def assign_underlay_ip(self, links, ip_subnet_start, filter_device=None): """ Incrementally picks a subnet starting from `ip_subnet_start` for each link and assigns an ip to each interface. :param links: a list of links. Each link must be represented as a dict. The dict format id the same as the return value, but without the ip address attributes :param filter_device: a device name. If present, the return value is filtered to return only the links belonging to this device :return: list of links. Each link is a dict: { source: { node: { name: str } interface: { name: str unit: str ip_address: ip/mask } }, target: { node: { name: str } interface: { name: str unit: str ip_address: ip/mask } } } """ # Iterate over links and assign ip addresses ip_subnet = IPNetwork(ip_subnet_start) ip_netmask = ip_subnet.prefixlen for link in links: # First host ip of the subnet is assigned to the source link["source"]["interface"]["ip_address"] = "{}/{}".format( str(list(ip_subnet.iter_hosts())[0]), ip_netmask) # Seconf host ip of the subnet is assigned to the target link["target"]["interface"]["ip_address"] = "{}/{}".format( str(list(ip_subnet.iter_hosts())[1]), ip_netmask) ip_subnet = ip_subnet.next(step=1) if filter_device: # Filter the result to only include links belonging to the filter device links = [ l for l in links if l["source"]["node"]["name"] == filter_device or l["target"]["node"]["name"] == filter_device ] return links
def test_lease_create(): global api global network_private global network_public net_addr = IPNetwork(network_public.address) for host in net_addr.iter_hosts(): if IPNetwork('%s/30' % str(host + 2)) == host: network_public.lease_create(host) net_addr = IPNetwork(network_private.address) for host in net_addr.iter_hosts(): if IPNetwork('%s/30' % str(host + 2)) == host: network_private.lease_create(host)
class Subnets(object): def __init__(self,subnet): self.subnet = subnet self.pefix = IPNetwork(self.subnet) @property def prefix(self): return self.pefix @property def hosts(self): return self.pefix.iter_hosts() @property def netmask(self): return self.pefix.netmask @property def sub_network(self): return self.pefix.network @property def range(self): ip_list = list(self.hosts) range = str(ip_list[0]) + '#' + str(len(ip_list)) return range @property def range2(self): ip_list = list(self.hosts) # vmware adds to the first given ip for count of IP's count = len(ip_list) - 2 range = str(ip_list[2]) + '#' + str(count) return range
def _add_search_filters(filters, query): """ Add SearchService response to filters """ search_query = query if IP_CIDR_RE.match(query): try: # Try to parse IP/CIDR search network = IPNetwork(query) if network.size <= 4096: search_query = " ".join( [str(host) for host in network.iter_hosts()]) search_query = search_query if search_query else query except (AttributeError, IndexError, AddrFormatError, AddrConversionError): pass try: reports = SearchService.search_reports(search_query) if not reports: reports = [None] except SearchServiceException: return if "in" in filters["where"]: for field in filters["where"]["in"]: for key, values in field.iteritems(): if key == "reportTicket__id" and values: reports.extend(values) filters["where"]["in"].remove({key: values}) filters["where"]["in"].append({"reportTicket__id": reports}) else: filters["where"]["in"] = [{"reportTicket__id": reports}]
class Subnets(object): def __init__(self, subnet): self.subnet = subnet self.pefix = IPNetwork(self.subnet) @property def prefix(self): return self.pefix @property def hosts(self): return self.pefix.iter_hosts() @property def netmask(self): return self.pefix.netmask @property def sub_network(self): return self.pefix.network @property def range(self): ip_list = list(self.hosts) range = str(ip_list[0]) + '#' + str(len(ip_list)) return range @property def range2(self): ip_list = list(self.hosts) # vmware adds to the first given ip for count of IP's count = len(ip_list) - 2 range = str(ip_list[2]) + '#' + str(count) return range
def _validateLoopbackPrefix(self, pod, podDict, inventoryData): inventoryDeviceCount = len(inventoryData['spines']) + len(inventoryData['leafs']) lo0Block = IPNetwork(podDict['loopbackPrefix']) lo0Ips = list(lo0Block.iter_hosts()) availableIps = len(lo0Ips) if availableIps < inventoryDeviceCount: raise ValueError("Pod[id='%s', name='%s']: loopbackPrefix available IPs %d not enough: required %d" % (pod.id, pod.name, availableIps, inventoryDeviceCount))
class Subnets(object): def __init__(self,subnet): self.subnet = subnet self.pefix = IPNetwork(self.subnet) @property def prefix(self): return self.pefix @property def hosts(self): return self.pefix.iter_hosts() @property def netmask(self): return self.pefix.netmask @property def sub_network(self): return self.pefix.network @property def range(self): ip_list = list(self.hosts) range = str(ip_list[0]) + '#' + str(len(ip_list)) return range
def reserve_ip_addr(self): """Picks first available IP address from weave network. If there's no available IP address anymore, ``IndexError`` will be raised. To prevent this error, catch the exception or checks the value ``GluuCluster.ip_addr_available`` first before trying to call this method. :returns: A 2-elements tuple consists of IP address and network prefix, e.g. ``("10.10.10.1", 24)``. """ # represents a pool of IP addresses pool = IPNetwork(self.weave_ip_network) # a generator holds possible IP addresses range, excluding exposed weave IP ip_range = IPSet(pool.iter_hosts()) ^ IPSet(self.reserved_ip_addrs) ^ IPSet([self.exposed_weave_ip[0]]) # retrieves first IP address from ``ip_range`` generator ip_addr = list(itertools.islice(ip_range, 1))[0] # register the IP address so it will be excluded # from possible IP range in subsequent requests self.reserved_ip_addrs.append(str(ip_addr)) # weave IP address for container expects a traditional CIDR, # e.g. 10.10.10.1/24, hence we return the actual IP and # its prefix length return str(ip_addr), pool.prefixlen
def _add_search_filters(filters, query): """ Add Search Service response to filters """ search_query = query # Try to parse IP/CIDR search if IP_CIDR_RE.match(query): try: network = IPNetwork(query) if network.size <= 4096: search_query = ' '.join([str(host) for host in network.iter_hosts()]) search_query = search_query if search_query else query except (AttributeError, IndexError, AddrFormatError, AddrConversionError): pass try: reports = ImplementationFactory.instance.get_singleton_of('SearchServiceBase').search_reports(search_query) if not reports: reports = [None] except SearchServiceException: return if 'in' in filters['where']: for field in filters['where']['in']: for key, values in field.iteritems(): if key == 'id' and len(values): reports.extend(values) filters['where']['in'].remove({key: values}) filters['where']['in'].append({'id': list(set(reports))}) else: filters['where']['in'] = [{'id': reports}]
def populateDhcpGlobalSettings(self): ztp = {} ztpGlobalSettings = util.loadClosDefinition()['ztp'] subnet = ztpGlobalSettings['dhcpSubnet'] dhcpBlock = IPNetwork(subnet) ipList = list(dhcpBlock.iter_hosts()) ztp['network'] = str(dhcpBlock.network) ztp['netmask'] = str(dhcpBlock.netmask) ztp['defaultRoute'] = ztpGlobalSettings.get('dhcpOptionRoute') if ztp['defaultRoute'] is None or ztp['defaultRoute'] == '': ztp['defaultRoute'] = str(ipList[0]) ztp['rangeStart'] = ztpGlobalSettings.get('dhcpOptionRangeStart') if ztp['rangeStart'] is None or ztp['rangeStart'] == '': ztp['rangeStart'] = str(ipList[1]) ztp['rangeEnd'] = ztpGlobalSettings.get('dhcpOptionRangeEnd') if ztp['rangeEnd'] is None or ztp['rangeEnd'] == '': ztp['rangeEnd'] = str(ipList[-1]) ztp['broadcast'] = str(dhcpBlock.broadcast) ztp['httpServerIp'] = self.__conf['httpServer']['ipAddr'] if ztpGlobalSettings.get('junosImage') is not None: # don't start url as /openclos/... first / causes ZTP problem ztp['imageUrl'] = 'openclos/images/' + ztpGlobalSettings.get( 'junosImage') return ztp
def assign_group_to_cidr(deployment_group, cidr, connection_params): network = IPNetwork(cidr) client = mazerunner.connect(**connection_params) group = find_deployment_group(client, deployment_group) for address in network.iter_hosts(): name = get_hostname_for_ip(str(address)) if not name: print "Could not resolve hostname for {}".format(address) continue # Find the endpoint object endpoints = client.endpoints.filter(name) e = find_endpoint(endpoints, name) if not e: print "Could not find endpoint object for {}".format(name) continue try: client.endpoints.reassign_to_group(group, [e]) except mazerunner.exceptions.ValidationError: # Workaround... pass print "Endpoint at {} ({}) assigned to group {}".format( address, name, deployment_group)
def test_lease_create(): global api global network net_addr = IPNetwork(network.address + '/' + str(network.mask)) for host in net_addr.iter_hosts(): network.lease_create(host)
def _start_searching(self, network_card): """ Starts searching for new network cards. """ network = IPNetwork('/'.join( [network_card["Gateway"], network_card["Mask"]])) generator = network.iter_hosts() for i in generator: i = str(i) if self._FINISH_SEARCHING: self.change_search_button_off(True, False) break else: if not i in self.assigned_devices and not i == network_card[ "IP Address"]: response = subprocess.Popen( 'ping -n 1 {}'.format(i), stdout=subprocess.PIPE).communicate()[0] if "unreachable" in str( response) or "Request timed out" in str(response): pass else: self.insert_device(self.selected_netcard, i) self.assigned_devices.append(i) self.refresh_searching() else: pass self._FINISH_SEARCHING = False self.change_search_button_off(True, False) return
def populateDhcpGlobalSettings(self): ztp = {} ztpGlobalSettings = util.loadClosDefinition()['ztp'] subnet = ztpGlobalSettings['dhcpSubnet'] dhcpBlock = IPNetwork(subnet) ipList = list(dhcpBlock.iter_hosts()) ztp['network'] = str(dhcpBlock.network) ztp['netmask'] = str(dhcpBlock.netmask) ztp['defaultRoute'] = ztpGlobalSettings.get('dhcpOptionRoute') if ztp['defaultRoute'] is None or ztp['defaultRoute'] == '': ztp['defaultRoute'] = str(ipList[0]) ztp['rangeStart'] = ztpGlobalSettings.get('dhcpOptionRangeStart') if ztp['rangeStart'] is None or ztp['rangeStart'] == '': ztp['rangeStart'] = str(ipList[1]) ztp['rangeEnd'] = ztpGlobalSettings.get('dhcpOptionRangeEnd') if ztp['rangeEnd'] is None or ztp['rangeEnd'] == '': ztp['rangeEnd'] = str(ipList[-1]) ztp['broadcast'] = str(dhcpBlock.broadcast) ztp['httpServerIp'] = self.conf['httpServer']['ipAddr'] ztp['imageUrl'] = ztpGlobalSettings.get('junosImage') return ztp
def __init__(self, params): BasePayload.__init__(self, params) try: from netaddr import IPNetwork from netaddr.core import AddrFormatError net = IPNetwork("%s" % self.params["net"]) self.f = net.iter_hosts() self.__count = net.size - 2 if self.__count <= 0: raise FuzzExceptPluginBadParams( "There are not hosts in the specified network") except ValueError: raise FuzzExceptPluginBadParams( "The specified network has an incorrect format.") except ImportError: raise FuzzExceptBadInstall( "ipnet plugin requires netaddr module. Please install it using pip." ) except AddrFormatError: raise FuzzExceptPluginBadParams( "The specified network has an incorrect format.")
def _validateLoopbackPrefix(self, pod, podDict, inventoryData): inventoryDeviceCount = len(inventoryData['spines']) + len(inventoryData['leafs']) lo0Block = IPNetwork(podDict['loopbackPrefix']) lo0Ips = list(lo0Block.iter_hosts()) availableIps = len(lo0Ips) cidr = 32 - int(math.ceil(math.log(inventoryDeviceCount, 2))) if availableIps < inventoryDeviceCount: raise InsufficientLoopbackIp("Pod[id='%s', name='%s']: loopbackPrefix minimum required: %s/%d" % (pod.id, pod.name, lo0Block.ip, cidr))
def _allocateLoopback(self, session, pod, loopbackPrefix, devices): loopbackIp = IPNetwork(loopbackPrefix).network numOfIps = len(devices) + 2 # +2 for network and broadcast numOfBits = int(math.ceil(math.log(numOfIps, 2))) cidr = 32 - numOfBits lo0Block = IPNetwork(str(loopbackIp) + "/" + str(cidr)) lo0Ips = list(lo0Block.iter_hosts()) pod.allocatedLoopbackBlock = str(lo0Block.cidr) self._assignAllocatedLoopbackToDevices(session, devices, lo0Ips)
def add_target(self, target): try: IP(target) address = target except Exception: address = None if address is not None: # Keep the IP address. self.__addresses.add(address) # If it's an IP network... else: try: network = IPNetwork(target) except Exception: ##raise # XXX DEBUG network = None if network is not None: # For each host IP address in range... for address in network.iter_hosts(): address = str(address) # Keep the IP address. self.__addresses.add(address) #domain elif self._re_is_domain.match(target): target = target.lower() if target not in self.__domains: # Keep the domain name. self.__domains.add(target) else: try: parsed_url = ParsedURL(target) url = parsed_url.url except Exception: url = None if url is not None: self.__web_pages.add(url) self.__target_url.add(url) host = parsed_url.host try: if host.startswith("[") and host.endswith("]"): IPAddress(host[1:-1], version=6) host = host[1:-1] else: IPAddress(host) self.__addresses.add(host) except Exception: ##raise # XXX DEBUG host = host.lower() if host not in self.__domains: self.__domains.add(host) else: raise ValueError("I don't know what to do with this: %s" % target)
def targets(self, targets): # Always append, never overwrite. # Fix target URLs if the scheme part is missing. # Make sure self._targets contains a list. self._targets = getattr(self, "_targets", []) # Ignore the trivial case. if not targets: return # Strip whitespace. targets = [ x.strip() for x in targets if x not in self._targets ] # Remove duplicates. targets = [ x for x in set(targets) if x not in self._targets ] # Encode all Unicode strings as UTF-8. targets = [ x.encode("UTF-8") if isinstance(x, unicode) else str(x) for x in targets if x not in self._targets ] # Detect network ranges, like 30.30.30.0/24, and get all IPs on it. parsed_targets = [] for host in targets: # Try to parse the address as a network range. try: tmp_target = IPNetwork(host) except: parsed_targets.append(host) continue # If it's a range, iterate it and get all IP addresses. # If it's a single IP address, just add it. if tmp_target.size != 1: parsed_targets.extend( str(x) for x in tmp_target.iter_hosts() ) else: parsed_targets.append( str(tmp_target.ip) ) # Add the new targets. self._targets.extend(parsed_targets)
def yield_ping_parameters(to_scan): """Yields each (interface, ip) pair to scan. :param to_scan: dict of {<interface-name>: <iterable-of-cidr-strings>}. """ for interface in to_scan: for cidr in to_scan[interface]: ipnetwork = IPNetwork(cidr) if ipnetwork.version == 4: for ip in ipnetwork.iter_hosts(): yield PingParameters(interface, str(ip))
def allocateLoopback(self, pod, loopbackPrefix, devices): numOfIps = len(devices) + 2 # +2 for network and broadcast numOfBits = int(math.ceil(math.log(numOfIps, 2))) cidr = 32 - numOfBits lo0Block = IPNetwork(loopbackPrefix + "/" + str(cidr)) lo0Ips = list(lo0Block.iter_hosts()) interfaces = [] pod.allocatedLoopbackBlock = str(lo0Block.cidr) for device in devices: ifl = InterfaceLogical('lo0.0', device, str(lo0Ips.pop(0)) + '/32') interfaces.append(ifl) self.dao.createObjects(interfaces)
def scanIPclosed(self,result): if result[0]: # scan subnet outside ipv4.address/24 if result[1] == "address": print "[Networkbrowser] got IP:",result[0] self.setStatus('update') net = IPNetwork('%s/24' % result[0]) localnet = IPNetwork('%s/%s' % (self._ipv4.address, self._ipv4.netmask)) if localnet.__contains__(net): self._startScan(self.iface, net.cidr) else: for host in net.iter_hosts(): self._nrthreads += 1 reactor.callInThread(self.getNetworkIPs, str(host)) # add offline host elif result[1] == "nfs": self.networklist.append(NetworkItemHost(result[0], result[0], ['nfs'])) write_cache(self.cache_file, self.networklist) self.updateNetworkList()
def targets(self, targets): # Always append, never overwrite. # Fix target URLs if the scheme part is missing. # Make sure self._targets contains a list. self._targets = getattr(self, "_targets", []) # Ignore the trivial case. if not targets: return # Strip whitespace. targets = [x.strip() for x in targets if x not in self._targets] # Remove duplicates. targets = [x for x in set(targets) if x not in self._targets] # Encode all Unicode strings as UTF-8. targets = [ x.encode("UTF-8") if isinstance(x, unicode) else str(x) for x in targets if x not in self._targets ] # Detect network ranges, like 30.30.30.0/24, and get all IPs on it. parsed_targets = [] for host in targets: # Try to parse the address as a network range. try: tmp_target = IPNetwork(host) except: parsed_targets.append(host) continue # If it's a range, iterate it and get all IP addresses. # If it's a single IP address, just add it. if tmp_target.size != 1: parsed_targets.extend(str(x) for x in tmp_target.iter_hosts()) else: parsed_targets.append(str(tmp_target.ip)) # Add the new targets. self._targets.extend(parsed_targets)
def __init__(self, params): BasePayload.__init__(self, params) try: from netaddr import IPNetwork from netaddr.core import AddrFormatError net = IPNetwork(u'%s' % self.params["net"]) self.f = net.iter_hosts() self.__count = net.size - 2 if self.__count <= 0: raise FuzzExceptPluginBadParams("There are not hosts in the specified network") except AddrFormatError: raise FuzzExceptPluginBadParams("The specified network has an incorrect format.") except ValueError: raise FuzzExceptPluginBadParams("The specified network has an incorrect format.") except ImportError: raise FuzzExceptBadInstall("ipnet plugin requires netaddr module. Please install it using pip.")
def scanIPclosed(self, result): if result[0]: # scan subnet outside ipv4.address/24 if result[1] == "address": print "[Networkbrowser] got IP:", result[0] self.setStatus('update') net = IPNetwork('%s/24' % result[0]) localnet = IPNetwork('%s/%s' % (self._ipv4.address, self._ipv4.netmask)) if localnet.__contains__(net): self._startScan(self.iface, net.cidr) else: for host in net.iter_hosts(): self._nrthreads += 1 reactor.callInThread(self.getNetworkIPs, str(host)) # add offline host elif result[1] == "nfs": self.networklist.append( NetworkItemHost(result[0], result[0], ['nfs'])) write_cache(self.cache_file, self.networklist) self.updateNetworkList()
class Controller(object): """Implementation of the clouds controller. This class implements functions to easily work with the available cloud objects. So far, it provides the following functionalities/abstractions: - crediting system (also terminate service when user out of credits) - adding nodes (VMs) - removing nodes (VMs) """ def __init__(self, config_parser, **kwargs): # Params for director callback self.__conpaas_creditUrl = config_parser.get('manager', 'CREDIT_URL') self.__conpaas_terminateUrl = config_parser.get( 'manager', 'TERMINATE_URL') self.__conpaas_service_id = config_parser.get('manager', 'SERVICE_ID') self.__conpaas_user_id = config_parser.get('manager', 'USER_ID') self.__conpaas_app_id = config_parser.get('manager', 'APP_ID') self.__conpaas_caUrl = config_parser.get('manager', 'CA_URL') # Set the CA URL as IPOP's base namespace self.__ipop_base_namespace = self.__conpaas_caUrl if config_parser.has_option('manager', 'IPOP_BASE_IP'): # Application-level network self.__ipop_base_ip = config_parser.get('manager', 'IPOP_BASE_IP') else: self.__ipop_base_ip = None if config_parser.has_option('manager', 'IPOP_NETMASK'): # Application-level netmask self.__ipop_netmask = config_parser.get('manager', 'IPOP_NETMASK') else: self.__ipop_netmask = None if config_parser.has_option('manager', 'IPOP_SUBNET'): # Only import from netaddr if IPOP has to be started from netaddr import IPNetwork # Subnet assigned to this service by the director self.__ipop_subnet = IPNetwork( config_parser.get('manager', 'IPOP_SUBNET')) else: self.__ipop_subnet = None # For crediting system self.__reservation_logger = create_logger('ReservationTimer') self.__reservation_map = { 'manager': ReservationTimer( ['manager'], 55 * 60, # 55mins self.__deduct_and_check_credit, self.__reservation_logger) } self.__reservation_map['manager'].start() self.__force_terminate_lock = Lock() self.config_parser = config_parser self.__created_nodes = [] self.__partially_created_nodes = [] self.__logger = create_logger(__name__) self.__available_clouds = [] self.__default_cloud = None if config_parser.has_option('iaas', 'DRIVER'): self.__default_cloud = iaas.get_cloud_instance( 'iaas', config_parser.get('iaas', 'DRIVER').lower(), config_parser) self.__available_clouds.append(self.__default_cloud) if config_parser.has_option('iaas', 'OTHER_CLOUDS'): self.__available_clouds.extend(iaas.get_clouds(config_parser)) # if there is no default cloud defined in 'iaas' if self.__default_cloud is None: self.__default_cloud = self.__available_clouds.pop(0) # Setting VM role self.role = 'agent' def get_available_ipop_address(self): """Return an unassigned IP address in this manager's VPN subnet""" # Network iterator network = self.__ipop_subnet.iter_hosts() # Currently running hosts running_hosts = [ str(node.ip) for node in self.__created_nodes + self.__partially_created_nodes ] self.__logger.debug("get_available_ipop_address: running nodes: %s" % running_hosts) # The first address is used by IPOP internally network.next() # The second one is taken by manager network.next() for host in network: host = str(host) if host not in running_hosts: self.__logger.debug( "get_available_ipop_address: returning %s" % host) return host #=========================================================================# # create_nodes(self, count, contextFile, test_agent) # #=========================================================================# def create_nodes(self, count, test_agent, port, cloud=None, inst_type=None): """ Creates the VMs associated with the list of nodes. It also tests if the agents started correctly. @param count The number of nodes to be created @param test_agent A callback function to test if the agent started correctly in the newly created VM @param port The port on which the agent will listen @param cloud (Optional) If specified, this function will start new nodes inside cloud, otherwise it will start new nodes inside the default cloud or wherever the controller wants (for now only the default cloud is used) @return A list of nodes of type node.ServiceNode """ ready = [] poll = [] iteration = 0 if cloud is None: cloud = self.__default_cloud if not self.deduct_credit(count): raise Exception('Could not add nodes. Not enough credits.') while len(ready) < count: iteration += 1 msg = '[create_nodes] iter %d: creating %d nodes on cloud %s' % ( iteration, count - len(ready), cloud.cloud_name) if inst_type: msg += ' of type %s' % inst_type self.__logger.debug(msg) try: self.__force_terminate_lock.acquire() if iteration == 1: request_start = time.time() service_type = self.config_parser.get('manager', 'TYPE') # eg: conpaas-agent-php-u34-s316 name = "conpaas-%s-%s-u%s-s%s" % (self.role, service_type, self.__conpaas_user_id, self.__conpaas_service_id) if self.__ipop_base_ip and self.__ipop_netmask: # If IPOP has to be used we need to update VMs # contextualization data for each new instance for _ in range(count - len(ready)): vpn_ip = self.get_available_ipop_address() self.update_context({'IPOP_IP_ADDRESS': vpn_ip}, cloud) for newinst in cloud.new_instances(1, name, inst_type): # Set VPN IP newinst.ip = vpn_ip if newinst.private_ip == '': # If private_ip is not set yet, use vpn_ip newinst.private_ip = vpn_ip self.__partially_created_nodes.append(newinst) self.__logger.debug("cloud.new_instances: %s" % poll) else: self.__partially_created_nodes = cloud.new_instances( count - len(ready), name, inst_type) except Exception as e: self.__logger.exception( '[_create_nodes]: Failed to request new nodes') self.delete_nodes(ready) self.__partially_created_nodes = [] raise e finally: self.__force_terminate_lock.release() poll, failed = self.__wait_for_nodes( self.__partially_created_nodes, test_agent, port) ready += poll poll = [] if failed: self.__logger.debug('[_create_nodes]: %d nodes ' 'failed to startup properly: %s' % (len(failed), str(failed))) self.__partially_created_nodes = [] self.delete_nodes(failed) self.__force_terminate_lock.acquire() self.__created_nodes += ready self.__partially_created_nodes = [] self.__force_terminate_lock.release() # start reservation timer with slack of 3 mins + time already wasted # this should be enough time to terminate instances before # hitting the following hour timer = ReservationTimer([i.id for i in ready], (55 * 60) - (time.time() - request_start), self.__deduct_and_check_credit, self.__reservation_logger) timer.start() # set mappings for i in ready: self.__reservation_map[i.id] = timer return ready #=========================================================================# # delete_nodes(self, nodes) # #=========================================================================# def delete_nodes(self, nodes): """Kills the VMs associated with the list of nodes. @param nodes The list of nodes to be removed; - a node must be of type ServiceNode or a class that extends ServiceNode """ for node in nodes: cloud = self.get_cloud_by_name(node.cloud_name) self.__logger.debug('[delete_nodes]: killing ' + str(node.id)) try: # node may not be in map if it failed to start if node.id in self.__reservation_map: timer = self.__reservation_map.pop(node.id) if timer.remove_node(node.id) < 1: timer.stop() cloud.kill_instance(node) except: self.__logger.exception( '[delete_nodes]: ' 'Failed to kill node %s', node.id) #=========================================================================# # list_vms(self, cloud=None) # #=========================================================================# def list_vms(self, cloud=None): """Returns an array with the VMs running at the given/default(s) cloud. @param cloud (Optional) If specified, this method will return the VMs already running at the given cloud """ if cloud is None: cloud = self.__default_cloud return cloud.list_vms() #=========================================================================# # generate_context(self, service_name, replace, cloud) # #=========================================================================# def generate_context(self, service_name, cloud=None, ip_whitelist=None): """Generates the contextualization file for the default/given cloud. @param cloud (Optional) If specified, the context will be generated for it, otherwise it will be generated for all the available clouds @param service_name Used to know which config_files and scripts to select """ def __set_cloud_ctx(cloud): contxt = self._get_context_file(service_name, cloud.get_cloud_type()) cloud.set_context_template(contxt) if cloud is None: for cloud in self.__available_clouds: __set_cloud_ctx(cloud) else: __set_cloud_ctx(cloud) #=========================================================================# # update_context(self, replace, cloud) # #=========================================================================# def update_context(self, replace={}, cloud=None): """Updates the contextualization file for the default/given cloud. @param replace A dictionary that specifies which words shoud be replaced with what. For example: replace = dict(name='A', age='57') context1 = '$name , $age' => new_context1 = 'A , 57' context2 ='${name}na, ${age}' => new_context2 = 'Ana, 57' @param cloud (Optional) If specified, the context will be generated for it, otherwise it will be generated for the default cloud """ if cloud is None: cloud = self.__default_cloud contxt = cloud.get_context_template() contxt = Template(contxt).safe_substitute(replace) cloud.config(context=contxt) #=========================================================================# # get_clouds(self) # #=========================================================================# def get_clouds(self): """ @return The list of cloud objects """ return self.__available_clouds #=========================================================================# # get_cloud_by_name(self) # #=========================================================================# def get_cloud_by_name(self, cloud_name): """ @param cloud_name @return The cloud object which name is the same as @param name """ try: return [ cloud for cloud in self.__available_clouds if cloud.get_cloud_name() == cloud_name ][0] except IndexError: raise Exception("Unknown cloud: %s. Available clouds: %s" % (cloud_name, self.__available_clouds)) #=========================================================================# # config_cloud(self, cloud, config_params) # #=========================================================================# def config_cloud(self, cloud, config_params): """Configures some parameters in the given cloud @param cloud The cloud to be configured @param config_params A dictionary containing the configuration parameters (are specific to the cloud) """ cloud.config(config_params) #=========================================================================# # config_clouds(self, config_params) # #=========================================================================# def config_clouds(self, config_params): """Same as config_cloud but for all available clouds @param config_params A dictionary containing the configuration parameters (are specific to the cloud) """ for cloud in self.__available_clouds: cloud.config(config_params) def __check_node(self, node, test_agent, port): """Return True if the given node has properly started an agent on the given port""" if node.ip == '' or node.private_ip == '': return False try: self.__logger.debug('[__check_node]: test_agent(%s, %s)' % (node.ip, port)) test_agent(node.ip, port) return True except socket.error, err: self.__logger.debug('[__check_node]: %s' % err) return False
def get_random_ip(cidr): net = IPNetwork(cidr) ip_list = list(net.iter_hosts()) index = random.randint(0, len(ip_list) - 1) return str(ip_list[index])
class FakeNodesGenerator(object): """This class uses to generate fake nodes""" def __init__(self): self.net1 = IPNetwork(NETWORK_1) self.net1_ip_pool = cycle(self.net1.iter_hosts()) self.net2 = IPNetwork(NETWORK_2) self.net2_ip_pool = cycle(self.net2.iter_hosts()) self.mcounter = dict() self.mac_counter = 0 def _get_network_data(self, net_name): if net_name == 'net1': return str(next(self.net1_ip_pool)), str(self.net1.netmask) if net_name == 'net2': return str(next(self.net2_ip_pool)), str(self.net2.netmask) return None, None def _generate_mac(self): # MAC's starts from FF:FF:FF:FF:FF:FE counting down mac = str( EUI(281474976710654 - self.mac_counter, dialect=mac_unix_expanded)) self.mac_counter += 1 return mac @staticmethod def _get_disk_suffixes(amount): length = 1 counter = 0 while counter < amount: for item in product(string.ascii_lowercase, repeat=length): if counter == amount: break counter += 1 yield ''.join(item) length += 1 def _generate_disks_meta(self, amount): disks = [] total_size = 0 for i, disk_suffix in enumerate(self._get_disk_suffixes(amount)): new_disk = copy.deepcopy(random.choice(DISK_SAMPLES)) # disks total size shouldn't be 0 if i == amount - 1 and total_size == 0: while new_disk['size'] == 0: new_disk = copy.deepcopy(random.choice(DISK_SAMPLES)) new_disk.update({ 'name': 'sd{0}'.format(disk_suffix), 'disk': 'sd{0}'.format(disk_suffix) }) total_size += new_disk['size'] disks.append(new_disk) return disks @staticmethod def _get_random_iface_offloading_modes(): offloading_modes = copy.deepcopy(SAMPLE_INTERFACE_OFFLOADING_MODES) for mode in offloading_modes: mode['state'] = random.choice([True, False, None]) for sub in mode.get('sub', []): sub['state'] = random.choice([True, False, None]) return offloading_modes def _generate_interfaces_meta(self, known_mac, known_ip, known_ip_mask, use_offload_iface, amount): ifaces = [] driver = random.choice(['igb'] * 9 + ['mlx4_en', 'eth_ipoib', 'e1000']) name = random.choice(['eth'] * 8 + ['wlan', 'p2p']) know_interface_num = random.randint(0, amount - 1) for i in six.moves.range(amount): max_speed = random.choice([100, 1000, 56000]) current_speed_set = [ random.randint(max_speed * 0.5, max_speed) for _ in range(3) ] current_speed_set.append(None) new_iface = { 'name': '{0}{1}'.format(name, i), 'driver': driver, 'bus_info': '0000:0{0}:00.0'.format(i), 'max_speed': max_speed, 'current_speed': random.choice(current_speed_set), 'pxe': random.choice([True, False]) } if i == know_interface_num: new_iface.update({ 'mac': known_mac, 'ip': known_ip, 'netmask': known_ip_mask }) else: new_iface['mac'] = self._generate_mac() net = random.choice(['net1', 'net2', None]) if net: ip, netmask = self._get_network_data(net) new_iface.update({'ip': ip, 'netmask': netmask}) if use_offload_iface: new_iface['offloading_modes'] = \ self._get_random_iface_offloading_modes() ifaces.append(new_iface) return ifaces @staticmethod def _generate_systems_meta(hostname, manufacture, platform_name): return { 'manufacturer': manufacture, 'version': '{0}.{0}'.format(random.randint(0, 10), random.randint(0, 9)), 'serial': ''.join([str(random.randint(0, 9)) for _ in six.moves.range(10)]), 'fqdn': '{0}.some-where.net'.format(hostname), 'product': platform_name, 'family': 'To be filled by O.E.M.' } @staticmethod def _generate_cpu_meta(kind): real_proc = random.choice([0, 1, 2, 4]) total_proc = real_proc * random.choice([1, 2, 4]) or 1 proc = random.choice(SAMPLE_CPUS[kind]) return { 'real': real_proc, 'total': total_proc, 'spec': [copy.deepcopy(proc) for _ in six.moves.range(total_proc)] } @staticmethod def _generate_memory_meta(amount): max_capacity = 1024**3 * random.choice([8, 16, 32, 64]) total_capacity = 0 devices = [] for _ in six.moves.range(amount): new_memory = copy.deepcopy(random.choice(MEMORY_DEVICE_SAMPLES)) if (total_capacity + new_memory['size']) > max_capacity: if total_capacity == 0: new_memory['size'] = max_capacity else: break total_capacity += new_memory['size'] devices.append(new_memory) return { 'slots': len(devices), 'total': total_capacity, 'maximum_capacity': max_capacity, 'devices': devices } def generate_fake_node(self, pk, is_online=True, is_error=False, use_offload_iface=False, min_ifaces_num=1): """Generate one fake node :param int pk: node's database primary key :param bool is_online: node's online status :param bool is_error: node's error status :param bool use_offload_iface: use offloading_modes data for node's interfaces or not :returns: kwargs dict that represents fake node """ kind = random.choice(['real', 'virtual']) manufacture = random.choice(MANUFACTURERS[kind]) self.mcounter[manufacture] = self.mcounter.get(manufacture, 0) + 1 hostname = 'node-{0}'.format(pk) platform_name = random.choice(['', 'X9SCD', 'N5110', 'X9DRW']) mac = self._generate_mac() net = random.choice(['net1', 'net2']) ip, netmask = self._get_network_data(net) return { 'pk': pk, 'model': 'nailgun.node', 'fields': { 'status': 'error' if is_error else 'discover', 'manufacturer': manufacture, 'name': manufacture + ' {0}({1})'.format( platform_name, self.mcounter.get(manufacture)), 'hostname': hostname, 'ip': ip, 'mac': mac, 'online': is_online, 'labels': {}, 'pending_addition': False, 'pending_deletion': False, 'platform_name': platform_name, 'os_platform': 'ubuntu', 'progress': 0, 'timestamp': '', 'meta': { 'cpu': self._generate_cpu_meta(kind), 'interfaces': self._generate_interfaces_meta( mac, ip, netmask, use_offload_iface, random.randrange(min_ifaces_num, 7)), 'disks': self._generate_disks_meta(random.randint(1, 7)), 'system': self._generate_systems_meta(hostname, manufacture, platform_name), 'memory': self._generate_memory_meta(random.randint(1, 8)) } } } def generate_fake_nodes(self, total_nodes_count, error_nodes_count=None, offline_nodes_count=None, offloading_ifaces_nodes_count=None, min_ifaces_num=1): """Generate list of fake nodes :param int total_nodes_count: total count of nodes to generate :param int error_nodes_count: count of error nodes (optional) :param int offline_nodes_count: count of offline nodes (optional) :param int offloading_ifaces_nodes_count: count of nodes with interface using offloading (optional) :returns: list of dicts, each of which represents node """ if error_nodes_count is None: error_nodes_count = int(0.09 * total_nodes_count) if offline_nodes_count is None: offline_nodes_count = int(0.08 * total_nodes_count) if error_nodes_count + offline_nodes_count > total_nodes_count: error_nodes_count = int(0.09 * total_nodes_count) offline_nodes_count = int(0.08 * total_nodes_count) if offloading_ifaces_nodes_count is None: offloading_ifaces_nodes_count = int(0.2 * total_nodes_count) total_nodes_range = six.moves.range(total_nodes_count) # Making error and offline random sets non intersecting error_nodes_indexes = set( random.sample(total_nodes_range, error_nodes_count)) offline_nodes_indexes = set( random.sample( set(total_nodes_range) - error_nodes_indexes, offline_nodes_count)) offloading_ifaces_nodes_indexes = set( random.sample(total_nodes_range, offloading_ifaces_nodes_count)) res = [] for i in total_nodes_range: node = self.generate_fake_node(i + 1, is_online=i not in offline_nodes_indexes, is_error=i in error_nodes_indexes, use_offload_iface=i in offloading_ifaces_nodes_indexes, min_ifaces_num=min_ifaces_num) res.append(node) return res
def add_targets(self, audit_config, dns_resolution = 1): # Validate the arguments. if dns_resolution not in (0, 1, 2): raise ValueError( "Argument 'dns_resolution' can only be 0, 1 or 2," " got %r instead" % dns_resolution) # Remember if subdomains are allowed. include_subdomains = audit_config.include_subdomains # We'll remember here what *new* domains were added, for IP resolution. new_domains = set() # For each user-supplied target string... for target in audit_config.targets: target = to_utf8(target) # If it's an IP address... try: # For IPv6 address if target.startswith("[") and target.endswith("]"): IPAddress(target[1:-1], version=6) address = target[1:-1] else: # IPv4 IPAddress(target) address = target except Exception: ##raise # XXX DEBUG address = None if address is not None: # Keep the IP address. self.__addresses.add(address) # If it's an IP network... else: try: network = IPNetwork(target) except Exception: ##raise # XXX DEBUG network = None if network is not None: # For each host IP address in range... for address in network.iter_hosts(): address = str(address) # Keep the IP address. self.__addresses.add(address) # If it's a domain name... elif self._re_is_domain.match(target): # Convert it to lowercase. target = target.lower() # Is the domain new? if target not in self.__domains: # Keep the domain name. self.__domains.add(target) new_domains.add(target) # If it's an URL... else: try: parsed_url = ParsedURL(target) url = parsed_url.url except Exception: ##raise # XXX DEBUG url = None if url is not None: # Keep the URL. self.__web_pages.add(url) #ADD By BlackYe self.__target_url.add(url) # If we allow parent folders... if audit_config.allow_parent: # Add the base URL too. self.__web_pages.add(parsed_url.base_url) # Extract the domain or IP address. host = parsed_url.host try: if host.startswith("[") and host.endswith("]"): IPAddress(host[1:-1], version=6) host = host[1:-1] else: IPAddress(host) self.__addresses.add(host) except Exception: ##raise # XXX DEBUG host = host.lower() if host not in self.__domains: self.__domains.add(host) new_domains.add(host) # If it's none of the above, fail. else: raise ValueError( "I don't know what to do with this: %s" % target) # If subdomains are allowed, we must include the parent domains. if include_subdomains: for hostname in new_domains.copy(): subdomain, domain, suffix = split_hostname(hostname) if subdomain: prefix = ".".join( (domain, suffix) ) for part in reversed(subdomain.split(".")): if prefix not in self.__roots and \ prefix not in self.__domains: new_domains.add(prefix) self.__domains.add(prefix) self.__roots.add(prefix) prefix = ".".join( (part, prefix) ) else: self.__roots.add(hostname) # Resolve each (new?) domain name and add the IP addresses as targets. if dns_resolution: if dns_resolution == 1: domains_to_resolve = new_domains else: domains_to_resolve = self.__domains for domain in domains_to_resolve: try: resolved = set( entry[4][0] for entry in getaddrinfo(domain, 80) if entry[0] in (AF_INET, AF_INET6) ) if resolved: self.__addresses.update(resolved) else: msg = "Cannot resolve domain name: %s" % domain warn(msg, RuntimeWarning) except Exception: msg = "Cannot resolve domain name: %s" % domain Logger.log_error("GoLismero:%s" % msg)
def add_targets(self, audit_config, dns_resolution = 1): # Validate the arguments. if dns_resolution not in (0, 1, 2): raise ValueError( "Argument 'dns_resolution' can only be 0, 1 or 2," " got %r instead" % dns_resolution) # Remember if subdomains are allowed. include_subdomains = audit_config.include_subdomains # We'll remember here what *new* domains were added, for IP resolution. new_domains = set() # For each user-supplied target string... for target in audit_config.targets: target = to_utf8(target) # If it's an IP address... try: # For IPv6 address if target.startswith("[") and target.endswith("]"): IPAddress(target[1:-1], version=6) address = target[1:-1] else: # IPv4 IPAddress(target) address = target except Exception: ##raise # XXX DEBUG address = None if address is not None: # Keep the IP address. self.__addresses.add(address) # If it's an IP network... else: try: network = IPNetwork(target) except Exception: ##raise # XXX DEBUG network = None if network is not None: # For each host IP address in range... for address in network.iter_hosts(): address = str(address) # Keep the IP address. self.__addresses.add(address) # If it's a domain name... elif self._re_is_domain.match(target): # Convert it to lowercase. target = target.lower() # Is the domain new? if target not in self.__domains: # Keep the domain name. self.__domains.add(target) new_domains.add(target) # If it's an URL... else: try: parsed_url = ParsedURL(target) url = parsed_url.url except Exception: ##raise # XXX DEBUG url = None if url is not None: # Keep the URL. self.__web_pages.add(url) # If we allow parent folders... if audit_config.allow_parent: # Add the base URL too. self.__web_pages.add(parsed_url.base_url) # Extract the domain or IP address. host = parsed_url.host try: if host.startswith("[") and host.endswith("]"): IPAddress(host[1:-1], version=6) host = host[1:-1] else: IPAddress(host) self.__addresses.add(host) except Exception: ##raise # XXX DEBUG host = host.lower() if host not in self.__domains: self.__domains.add(host) new_domains.add(host) # If it's none of the above, fail. else: raise ValueError( "I don't know what to do with this: %s" % target) # If subdomains are allowed, we must include the parent domains. if include_subdomains: for hostname in new_domains.copy(): subdomain, domain, suffix = split_hostname(hostname) if subdomain: prefix = ".".join( (domain, suffix) ) for part in reversed(subdomain.split(".")): if prefix not in self.__roots and \ prefix not in self.__domains: new_domains.add(prefix) self.__domains.add(prefix) self.__roots.add(prefix) prefix = ".".join( (part, prefix) ) else: self.__roots.add(hostname) # Resolve each (new?) domain name and add the IP addresses as targets. if dns_resolution: if dns_resolution == 1: domains_to_resolve = new_domains else: domains_to_resolve = self.__domains for domain in domains_to_resolve: # Resolve the IPv4 addresses. resolved_4 = DNS.get_a(domain) for register in resolved_4: self.__addresses.add(register.address) # Resolve the IPv6 addresses. resolved_6 = DNS.get_aaaa(domain) for register in resolved_6: self.__addresses.add(register.address) # Warn when a domain cannot be resolved. if not resolved_4 and not resolved_6: msg = "Cannot resolve domain name: %s" % domain warn(msg, RuntimeWarning)
def __init__(self, audit_config): """ :param audit_config: Audit configuration. :type audit_config: AuditConfig """ # This is where we'll keep the parsed targets. self.__domains = set() # Domain names. self.__addresses = set() # IP addresses. self.__web_pages = set() # URLs. # Remember if subdomains are allowed. self.__include_subdomains = audit_config.include_subdomains # For each user-supplied target string... for target in audit_config.targets: # If it's a domain name... if self._re_is_domain.match(target): # Convert it to lowercase. target = target.lower() # Keep the domain name. self.__domains.add(target) # Guess an URL from it. # FIXME: this should be smarter and use port scanning! self.__web_pages.add("http://%s/" % target) # If it's an IP address... else: try: if target.startswith("[") and target.endswith("]"): IPAddress(target[1:-1], version=6) address = target[1:-1] else: IPAddress(target) address = target except Exception: address = None if address is not None: # Keep the IP address. self.__addresses.add(address) # Guess an URL from it. # FIXME: this should be smarter and use port scanning! self.__web_pages.add("http://%s/" % address) # If it's an IP network... else: try: network = IPNetwork(target) except Exception: network = None if network is not None: # For each host IP address in range... for address in network.iter_hosts(): address = str(address) # Keep the IP address. self.__addresses.add(address) # Guess an URL from it. # FIXME: this should be smarter and use port scanning! self.__web_pages.add("http://%s/" % address) # If it's an URL... else: try: parsed_url = ParsedURL(target) url = parsed_url.url except Exception: url = None if url is not None: # Keep the URL. self.__web_pages.add(url) # Extract the domain or IP address. host = parsed_url.host try: if host.startswith("[") and host.endswith("]"): IPAddress(host[1:-1], version=6) host = host[1:-1] else: IPAddress(host) self.__addresses.add(host) except Exception: self.__domains.add(host.lower()) # If subdomains are allowed, we must include the parent domains. if self.__include_subdomains: for hostname in self.__domains.copy(): subdomain, domain, suffix = split_hostname(hostname) if subdomain: prefix = ".".join((domain, suffix)) for part in reversed(subdomain.split(".")): self.__domains.add(prefix) prefix = ".".join((part, prefix)) # Resolve each domain name. dns_registers = [] for domain in self.__domains: # Resolve the IPv4 addresses. dns_registers.extend(DNS.get_a(domain)) # Resolve the IPv6 addresses. dns_registers.extend(DNS.get_aaaa(domain)) # If no IP addresses could be resolved, abort the audit. if self.__domains and not dns_registers: raise RuntimeError("No IP addresses could be resolved from" " the target domains, aborting audit!") # Add the addresses to the set of target addresses. for register in dns_registers: self.__addresses.add(register.address)
def handleEvent(self, event): eventName = event.eventType srcModuleName = event.module eventData = event.data self.debug(f"Received event, {eventName}, from {srcModuleName}") if self.errorState: return if srcModuleName == "sfp_tool_testsslsh": self.debug("Skipping event from myself.") return if not self.opts['testsslsh_path']: self.error( "You enabled sfp_tool_testsslsh but did not set a path to the tool!" ) self.errorState = True return exe = self.opts['testsslsh_path'] if self.opts['testsslsh_path'].endswith('/'): exe = f"{exe}testssl.sh" if not os.path.isfile(exe): self.error(f"File does not exist: {exe}") self.errorState = True return if not SpiderFootHelpers.sanitiseInput(eventData, extra=['/']): self.debug("Invalid input, skipping.") return targets = list() try: if eventName == "NETBLOCK_OWNER" and self.opts['netblockscan']: net = IPNetwork(eventData) if net.prefixlen < self.opts['netblockscanmax']: self.debug("Skipping scanning of " + eventData + ", too big.") return for addr in net.iter_hosts(): targets.append(str(addr)) except BaseException as e: self.error( f"Strange netblock identified, unable to parse: {eventData} ({e})" ) return # Don't look up stuff twice, check IP == IP here if eventData in self.results: self.debug(f"Skipping {eventData} as already scanned.") return else: if eventName != "INTERNET_NAME": # Might be a subnet within a subnet or IP within a subnet for addr in self.results: try: if IPNetwork(eventData) in IPNetwork(addr): self.debug( f"Skipping {eventData} as already within a scanned range." ) return except BaseException: # self.results will also contain hostnames continue self.results[eventData] = True # If we weren't passed a netblock, this will be empty if not targets: targets.append(eventData) for target in targets: # Create a temporary output file _, fname = tempfile.mkstemp("testssl.json") args = [ exe, "-U", "--connect-timeout", "5", "--openssl-timeout", "5", "--jsonfile", fname, target ] try: p = Popen(args, stdout=PIPE, stderr=PIPE) out, stderr = p.communicate(input=None) stdout = out.decode(sys.stdin.encoding) except Exception as e: self.error(f"Unable to run testssl.sh: {e}") os.unlink(fname) continue if p.returncode != 0: err = None if "Unable to open a socket" in stdout: err = "Unable to connect" else: err = "Internal error" self.error( f"Unable to read testssl.sh output for {target}: {err}") os.unlink(fname) continue if not stdout: self.debug(f"testssl.sh returned no output for {target}") os.unlink(fname) continue try: with open(fname, "r") as f: result_json = json.loads(f.read()) os.unlink(fname) except Exception as e: self.error( f"Could not parse testssl.sh output as JSON: {e}\nstderr: {stderr}\nstdout: {stdout}" ) continue if not result_json: self.debug(f"testssl.sh returned no output for {target}") continue for result in result_json: if result['finding'] == "not vulnerable": continue if result['severity'] not in [ "LOW", "MEDIUM", "HIGH", "CRITICAL" ]: continue if 'cve' in result: for cve in result['cve'].split(" "): etype, cvetext = self.sf.cveInfo(cve) evt = SpiderFootEvent(etype, cvetext, self.__name__, event) self.notifyListeners(evt) else: evt = SpiderFootEvent( "VULNERABILITY_GENERAL", f"{result['id']} ({result['finding']})", self.__name__, event) self.notifyListeners(evt)
class FakeNodesGenerator: """This class uses to generate fake nodes """ def __init__(self): self.net1 = IPNetwork(NETWORK_1) self.net1_ip_pool = cycle(self.net1.iter_hosts()) self.net2 = IPNetwork(NETWORK_2) self.net2_ip_pool = cycle(self.net2.iter_hosts()) self.mcounter = dict() self.mac_counter = 0 def _get_network_data(self, net_name): if net_name == 'net1': return str(next(self.net1_ip_pool)), str(self.net1.netmask) if net_name == 'net2': return str(next(self.net2_ip_pool)), str(self.net2.netmask) return None, None def _generate_mac(self): # MAC's starts from FF:FF:FF:FF:FF:FE counting down mac = str(EUI(281474976710654 - self.mac_counter, dialect=mac_unix_expanded)) self.mac_counter += 1 return mac @staticmethod def _get_disk_suffixes(amount): length = 1 counter = 0 while counter < amount: for item in product(string.ascii_lowercase, repeat=length): if counter == amount: break counter += 1 yield ''.join(item) length += 1 def _generate_disks_meta(self, amount): disks = [] total_size = 0 for i, disk_suffix in enumerate(self._get_disk_suffixes(amount)): new_disk = copy.deepcopy(random.choice(DISK_SAMPLES)) # disks total size shouldn't be 0 if i == amount - 1 and total_size == 0: while new_disk['size'] == 0: new_disk = copy.deepcopy(random.choice(DISK_SAMPLES)) new_disk.update({ 'name': 'sd{0}'.format(disk_suffix), 'disk': 'sd{0}'.format(disk_suffix) }) total_size += new_disk['size'] disks.append(new_disk) return disks @staticmethod def _get_random_iface_offloading_modes(): offloading_modes = copy.deepcopy(SAMPLE_INTERFACE_OFFLOADING_MODES) for mode in offloading_modes: mode['state'] = random.choice([True, False, None]) for sub in mode.get('sub', []): sub['state'] = random.choice([True, False, None]) return offloading_modes def _generate_interfaces_meta(self, known_mac, known_ip, known_ip_mask, use_offload_iface, amount): ifaces = [] driver = random.choice(['igb'] * 9 + ['mlx4_en', 'eth_ipoib', 'e1000']) name = random.choice(['eth'] * 8 + ['wlan', 'p2p']) know_interface_num = random.randint(0, amount - 1) for i in six.moves.range(amount): max_speed = random.choice([100, 1000, 56000]) current_speed_set = [ random.randint(max_speed * 0.5, max_speed) for _ in range(3)] current_speed_set.append(None) new_iface = { 'name': '{0}{1}'.format(name, i), 'driver': driver, 'bus_info': '0000:0{0}:00.0'.format(i), 'max_speed': max_speed, 'current_speed': random.choice(current_speed_set), 'pxe': random.choice([True, False]) } if i == know_interface_num: new_iface.update({ 'mac': known_mac, 'ip': known_ip, 'netmask': known_ip_mask }) else: new_iface['mac'] = self._generate_mac() net = random.choice(['net1', 'net2', None]) if net: ip, netmask = self._get_network_data(net) new_iface.update({ 'ip': ip, 'netmask': netmask }) if use_offload_iface: new_iface['offloading_modes'] = \ self._get_random_iface_offloading_modes() ifaces.append(new_iface) return ifaces @staticmethod def _generate_systems_meta(hostname, manufacture, platform_name): return { 'manufacturer': manufacture, 'version': '{0}.{0}'.format(random.randint(0, 10), random.randint(0, 9)), 'serial': ''.join( [str(random.randint(0, 9)) for _ in six.moves.range(10)]), 'fqdn': '{0}.mirantis.net'.format(hostname), 'product': platform_name, 'family': 'To be filled by O.E.M.' } @staticmethod def _generate_cpu_meta(kind): real_proc = random.choice([0, 1, 2, 4]) total_proc = real_proc * random.choice([1, 2, 4]) or 1 proc = random.choice(SAMPLE_CPUS[kind]) return { 'real': real_proc, 'total': total_proc, 'spec': [copy.deepcopy(proc) for _ in six.moves.range(total_proc)] } @staticmethod def _generate_memory_meta(amount): max_capacity = 1024 ** 3 * random.choice([8, 16, 32, 64]) total_capacity = 0 devices = [] for _ in six.moves.range(amount): new_memory = copy.deepcopy( random.choice(MEMORY_DEVICE_SAMPLES)) if (total_capacity + new_memory['size']) > max_capacity: if total_capacity == 0: new_memory['size'] = max_capacity else: break total_capacity += new_memory['size'] devices.append(new_memory) return { 'slots': len(devices), 'total': total_capacity, 'maximum_capacity': max_capacity, 'devices': devices } def generate_fake_node(self, pk, is_online=True, is_error=False, use_offload_iface=False, min_ifaces_num=1): """Generate one fake node :param int pk: node's database primary key :param bool is_online: node's online status :param bool is_error: node's error status :param bool use_offload_iface: use offloading_modes data for node's interfaces or not :returns: kwargs dict that represents fake node """ kind = random.choice(['real', 'virtual']) manufacture = random.choice(MANUFACTURERS[kind]) self.mcounter[manufacture] = self.mcounter.get(manufacture, 0) + 1 hostname = 'node-{0}'.format(pk) platform_name = random.choice(['', 'X9SCD', 'N5110', 'X9DRW']) mac = self._generate_mac() net = random.choice(['net1', 'net2']) ip, netmask = self._get_network_data(net) return { 'pk': pk, 'model': 'nailgun.node', 'fields': { 'status': 'error' if is_error else 'discover', 'manufacturer': manufacture, 'name': manufacture + ' {0}({1})'.format( platform_name, self.mcounter.get(manufacture)), 'hostname': hostname, 'ip': ip, 'mac': mac, 'online': is_online, 'labels': {}, 'pending_addition': False, 'pending_deletion': False, 'platform_name': platform_name, 'os_platform': 'ubuntu', 'progress': 0, 'timestamp': '', 'meta': { 'cpu': self._generate_cpu_meta(kind), 'interfaces': self._generate_interfaces_meta( mac, ip, netmask, use_offload_iface, random.randrange(min_ifaces_num, 7)), 'disks': self._generate_disks_meta(random.randint(1, 7)), 'system': self._generate_systems_meta( hostname, manufacture, platform_name), 'memory': self._generate_memory_meta(random.randint(1, 8)) } } } def generate_fake_nodes(self, total_nodes_count, error_nodes_count=None, offline_nodes_count=None, offloading_ifaces_nodes_count=None, min_ifaces_num=1): """Generate list of fake nodes :param int total_nodes_count: total count of nodes to generate :param int error_nodes_count: count of error nodes (optional) :param int offline_nodes_count: count of offline nodes (optional) :param int offloading_ifaces_nodes_count: count of nodes with interface using offloading (optional) :returns: list of dicts, each of which represents node """ if error_nodes_count is None: error_nodes_count = int(0.09 * total_nodes_count) if offline_nodes_count is None: offline_nodes_count = int(0.08 * total_nodes_count) if error_nodes_count + offline_nodes_count > total_nodes_count: error_nodes_count = int(0.09 * total_nodes_count) offline_nodes_count = int(0.08 * total_nodes_count) if offloading_ifaces_nodes_count is None: offloading_ifaces_nodes_count = int(0.2 * total_nodes_count) total_nodes_range = six.moves.range(total_nodes_count) # Making error and offline random sets non intersecting error_nodes_indexes = set(random.sample( total_nodes_range, error_nodes_count)) offline_nodes_indexes = set(random.sample( set(total_nodes_range) - error_nodes_indexes, offline_nodes_count )) offloading_ifaces_nodes_indexes = set(random.sample( total_nodes_range, offloading_ifaces_nodes_count)) res = [] for i in total_nodes_range: node = self.generate_fake_node( i + 1, is_online=i not in offline_nodes_indexes, is_error=i in error_nodes_indexes, use_offload_iface=i in offloading_ifaces_nodes_indexes, min_ifaces_num=min_ifaces_num ) res.append(node) return res
def add_targets(self, audit_config, dns_resolution=1): # Validate the arguments. if dns_resolution not in (0, 1, 2): raise ValueError("Argument 'dns_resolution' can only be 0, 1 or 2, got %r instead" % dns_resolution) # Remember if subdomains are allowed. include_subdomains = audit_config.include_subdomains # We'll remember here what *new* domains were added, for IP resolution. new_domains = set() # For each user-supplied target string... for target in audit_config.targets: # If it's an IP address... try: if target.startswith("[") and target.endswith("]"): IPAddress(target[1:-1], version=6) address = target[1:-1] else: IPAddress(target) address = target except Exception: address = None if address is not None: # Keep the IP address. self.__addresses.add(address) # If it's an IP network... else: try: network = IPNetwork(target) except Exception: network = None if network is not None: # For each host IP address in range... for address in network.iter_hosts(): address = str(address) # Keep the IP address. self.__addresses.add(address) # If it's a domain name... elif self._re_is_domain.match(target): # Convert it to lowercase. target = target.lower() # Is the domain new? if target not in self.__domains: # Keep the domain name. self.__domains.add(target) new_domains.add(target) # If it's an URL... else: try: parsed_url = ParsedURL(target) url = parsed_url.url except Exception: url = None if url is not None: # Keep the URL. self.__web_pages.add(url) # Extract the domain or IP address. host = parsed_url.host try: if host.startswith("[") and host.endswith("]"): IPAddress(host[1:-1], version=6) host = host[1:-1] else: IPAddress(host) self.__addresses.add(host) except Exception: host = host.lower() if host not in self.__domains: self.__domains.add(host) new_domains.add(host) # If it's none of the above, fail. else: raise ValueError("I don't know what to do with this: %s" % target) # If subdomains are allowed, we must include the parent domains. if include_subdomains: for hostname in new_domains.copy(): subdomain, domain, suffix = split_hostname(hostname) if subdomain: prefix = ".".join((domain, suffix)) for part in reversed(subdomain.split(".")): if prefix not in self.__roots and prefix not in self.__domains: new_domains.add(prefix) self.__domains.add(prefix) self.__roots.add(prefix) prefix = ".".join((part, prefix)) else: self.__roots.add(hostname) # Resolve each (new?) domain name and add the IP addresses as targets. if dns_resolution: if dns_resolution == 1: domains_to_resolve = new_domains else: domains_to_resolve = self.__domains for domain in domains_to_resolve: # Resolve the IPv4 addresses. resolved_4 = DNS.get_a(domain) for register in resolved_4: self.__addresses.add(register.address) # Resolve the IPv6 addresses. resolved_6 = DNS.get_aaaa(domain) for register in resolved_6: self.__addresses.add(register.address) # Warn when a domain cannot be resolved. if not resolved_4 and not resolved_6: msg = "Cannot resolve domain name: %s" % domain warn(msg, RuntimeWarning)
def exposed_weave_ip(self): pool = IPNetwork(self.weave_ip_network) # get the last element of host IP address addr = list(itertools.islice(pool.iter_hosts(), pool.size - 3, pool.size))[0] return str(addr), pool.prefixlen
def _allocate_router_and_container_ips(pool: netaddr.IPNetwork) -> (netaddr.IPAddress, netaddr.IPAddress): hosts = pool.iter_hosts() next(hosts) # skip docker default gateway return next(hosts), next(hosts)
class Network: json_file = File('ipnetworks') json_temp = {} def __init__(self, cidr, **kwargs): self.ipnetwork = IPNetwork(cidr) self.cidr = self.ipnetwork.cidr.__str__() if cidr != self.cidr: raise AnsibleError( "Invalid IP network: %s is an IP address belong to %s" % (cidr, self.cidr)) elif self.ipnetwork.prefixlen > 30: raise AnsibleError( "Network prefix length > 30 is not supported: %s" % self.cidr) self.ipset = IPSet(self.ipnetwork) self.existing_ipset = IPSet() self.exclude_ipaddrs = [ self.ipnetwork.network, self.ipnetwork.broadcast ] self.attrs = { 'parent': None, 'is_parent': False, 'state': None, } # for ipnetwork, attrs in self.json_file.items(): # _ipnetwork = IPNetwork(ipnetwork) # _cidr = _ipnetwork.cidr.__str__() # # if self.ipnetwork.__contains__(_ipnetwork) and self.cidr != _cidr: # print('***A - ', self.cidr, _cidr, attrs, '\n') # # if attrs['state'] is None: # attrs['parent'] = self.cidr # self.attrs['state'] = 'subnetted' # self.attrs['is_parent'] = True # print('***Ax - ', self.cidr, _cidr, attrs, '\n') # # if attrs['state'] == 'allocated': # # self.json_file[self.cidr]['state'] = 'overlapping' # # elif attrs['parent'] is None: # # attrs['state'] = 'merged' # # elif not attrs['is_ipaddr']: # # print('***A - ', self.cidr, _cidr, attrs, '\n') # # attrs['parent'] = self.cidr # # self.is_parent = True # elif _ipnetwork.__contains__(self.ipnetwork) and self.cidr != _cidr: # if attrs['parent'] is not None and attrs['is_parent']: # self.attrs['parent'] = _cidr # attrs['is_parent'] = True # attrs['state'] = 'subnetted' # elif attrs['state'] is None: # self.attrs['parent'] = _cidr # attrs['is_parent'] = True # attrs['state'] = 'subnetted' # print('***B - ', self.cidr, _cidr, attrs, '\n') # attrs['is_parent'] = True # self.parent = ipnetwork # if attrs['usable_ips'] is not None: # raise AnsibleError( # 'a Network %s overlaps with existing network %s' % (self.cidr, _cidr)) # elif attrs['is_parent']: # raise AnsibleError( # 'a Network %s overlaps with existing network %s' % (self.cidr, _cidr)) # # else: # # self.existing_ipset.add(ipnetwork) # elif self.ipnetwork != ipnetwork and network_contains_in_self: # if not attrs['is_parent']: # print('***C - ', self.cidr, _cidr, attrs, '\n') # raise AnsibleError( # 'b Network %s overlaps with existing network %s' % (self.cidr, _cidr)) # else: # print('***B - ', self.cidr, _cidr, attrs, '\n') # attrs['is_parent'] = True # self.parent = _cidr # self.is_subnet = True # elif self.ipnetwork == ipnetwork: # attrs.update(**kwargs) self.json_file.update(self.data(cidr=self.cidr, **self.attrs)) # # self.existing_ipset = IPSet([ # k for k, v in self.json_file.items() # if v['parent'] == self.cidr # ]) @property def subnets(self): return (network for network, attrs in self.json_file.items() if attrs['parent'] == self.cidr) @property def ipaddrs(self): return (network for network, attrs in self.json_file.items() if attrs['parent'] == self.cidr and attrs['is_ipaddr']) def data(self, **kwargs): key = kwargs['cidr'] attrs = { 'description': kwargs.get('description', None), 'parent': kwargs.get('parent', None), 'is_parent': kwargs.get('is_parent', False), 'is_ipaddr': kwargs.get('is_ipaddr', False), 'is_subnet': kwargs.get('is_subnet', False), 'prefixlen': kwargs.get('prefixlen', None), 'prefix': kwargs.get('prefix', None), 'address': kwargs.get('address', None), 'usable_ips': kwargs.get('usable_ip', None), 'iprange': kwargs.get('iprange', None), 'state': kwargs.get('state', None), } return {key: attrs} def iter_subnets(self, prefixlen, random=True): if self.attrs['iprange'] is not None: raise AnsibleError('%s network has already assign IP addresses' % self.cidr) subnets = [ s for c in (self.ipset - self.existing_ipset).iter_cidrs() for s in c.subnet(prefixlen) ] if random: shuffle(subnets) self.attrs['is_parent'] = True for subnet in subnets: # self.existing_ipset.add(subnet) yield str(subnet) def iter_ipaddrs(self, prefix_type=None, random=True, exclude_list=None): if self.attrs['iprange'] is None and self.is_parent: raise AnsibleError( "Network %s has already have a subnets assigned" % self.cidr) notation = '/%s' % self.ipnetwork.prefixlen if prefix_type == 'host': notation = '/32' elif prefix_type == 'addr': notation = '' ipaddrs = [ i for i in IPSet(self.ipnetwork.iter_hosts()) - self.existing_ipset ] if random: shuffle(ipaddrs) self.attrs.update({ 'iprange': '{}-{}'.format(*self.exclude_ipaddrs), 'usable_ip': len(self.ipset) - 2, 'state': 'allocated', }) for ipaddr in ipaddrs: _ipaddr = "%s%s" % (ipaddr, notation) self.json_temp.update( self.data(cidr=IPNetwork(ipaddr).__str__(), parent=self.cidr, is_ipaddr=True, prefixlen=self.attrs['prefixlen'], prefix='%s/%s' % (ipaddr.__str__(), self.attrs['prefixlen']), address=ipaddr.__str__())) self.existing_ipset.add(ipaddr) yield _ipaddr def get_ipaddr(self, index): if self.attrs['iprange'] is None and self.is_parent: raise AnsibleError( "Network %s has already have a subnets assigned" % self.cidr) notation = '/%s' % self.ipnetwork.prefixlen ipaddrs = [ i for i in IPSet(self.ipnetwork.iter_hosts()) - self.existing_ipset ] self.attrs.update({ 'iprange': '{}-{}'.format(*self.exclude_ipaddrs), 'num_usable_ip': len(self.ipset) - 2 }) ipaddrs.reverse() for ipaddr in ipaddrs: _ipaddr = "%s%s" % (ipaddr, notation) self.json_temp.update( self.data(cidr=IPNetwork(ipaddr).__str__(), parent=self.cidr, is_ipaddr=True, prefixlen=self.attrs['prefixlen'], prefix='%s/%s' % (ipaddr.__str__(), self.attrs['prefixlen']), address=ipaddr.__str__())) self.existing_ipset.add(ipaddr) yield _ipaddr def remove(self): subnets = { k for k, v in self.json_file.items() if v['parent'] == self.cidr } for subnet in subnets: del self.json_file[subnet] del self.json_file[self.cidr] def _host_addr(self, addr): return re.sub(r'/\d+', '/32', addr) def remove_ipaddr(self, addr): self.json_file.remove(self.host_addr(addr)) def save(self): self.json_file.update(self.json_temp) self.json_temp = {} for ipnetwork, attrs in self.json_file.copy().items(): if attrs['state'] == 'merged': del self.json_file[ipnetwork] self.json_file.save()
def __init__(self, audit_config): """ :param audit_config: Audit configuration. :type audit_config: AuditConfig """ # This is where we'll keep the parsed targets. self.__domains = set() # Domain names. self.__addresses = set() # IP addresses. self.__web_pages = set() # URLs. # Remember if subdomains are allowed. self.__include_subdomains = audit_config.include_subdomains # For each user-supplied target string... for target in audit_config.targets: # If it's a domain name... if self._re_is_domain.match(target): # Convert it to lowercase. target = target.lower() # Keep the domain name. self.__domains.add(target) # Guess an URL from it. # FIXME: this should be smarter and use port scanning! self.__web_pages.add("http://%s/" % target) # If it's an IP address... else: try: if target.startswith("[") and target.endswith("]"): IPAddress(target[1:-1], version=6) address = target[1:-1] else: IPAddress(target) address = target except Exception: address = None if address is not None: # Keep the IP address. self.__addresses.add(address) # Guess an URL from it. # FIXME: this should be smarter and use port scanning! self.__web_pages.add("http://%s/" % address) # If it's an IP network... else: try: network = IPNetwork(target) except Exception: network = None if network is not None: # For each host IP address in range... for address in network.iter_hosts(): address = str(address) # Keep the IP address. self.__addresses.add(address) # Guess an URL from it. # FIXME: this should be smarter and use port scanning! self.__web_pages.add("http://%s/" % address) # If it's an URL... else: try: parsed_url = ParsedURL(target) url = parsed_url.url except Exception: url = None if url is not None: # Keep the URL. self.__web_pages.add(url) # Extract the domain or IP address. host = parsed_url.host try: if host.startswith("[") and host.endswith("]"): IPAddress(host[1:-1], version=6) host = host[1:-1] else: IPAddress(host) self.__addresses.add(host) except Exception: self.__domains.add( host.lower() ) # If subdomains are allowed, we must include the parent domains. if self.__include_subdomains: for hostname in self.__domains.copy(): subdomain, domain, suffix = split_hostname(hostname) if subdomain: prefix = ".".join( (domain, suffix) ) for part in reversed(subdomain.split(".")): self.__domains.add(prefix) prefix = ".".join( (part, prefix) ) # Resolve each domain name. dns_registers = [] for domain in self.__domains: # Resolve the IPv4 addresses. dns_registers.extend( DNS.get_a(domain) ) # Resolve the IPv6 addresses. dns_registers.extend( DNS.get_aaaa(domain) ) # If no IP addresses could be resolved, abort the audit. if self.__domains and not dns_registers: raise RuntimeError( "No IP addresses could be resolved from" " the target domains, aborting audit!" ) # Add the addresses to the set of target addresses. for register in dns_registers: self.__addresses.add(register.address)
class Cidr(object): def __init__(self, cidr, models=None, controllers=None): LOG.info('%s(): cidr="%s"' % (KenLog.fcn(), cidr)) self._cidr = cidr self._models = models self._controllers = controllers self._ip = IPNetwork(cidr) self._start_address = self.get_first_address() self._ip_size = self._ip.size self._ip_index_start = 1 self._ip_index_end = 1 self._update_from_cache() self._gateway = None def get_first_address(self): for ip in self._ip.iter_hosts(): return ip def get_next_address(self): LOG.info('%s()' % KenLog.fcn()) value = self._ip[self._ip_index_end] self._ip_index_end += 1 self._set_cache() LOG.debug('get_next_address(): value=%s, ip_index_end=%s' % (value, self._ip_index_end)) return value.format() def _set_cache(self): if not self._models or not self._controllers: return cache = StatePersistor(self._models, self._controllers, persistence_file='cidr.yml') elem = dict() elem['ip_index_start'] = self._ip_index_start elem['ip_index_end'] = self._ip_index_end cache_info = dict() cache_info[self._cidr] = elem cache.persist_info(cache_info) def _get_cache(self): if not self._models or not self._controllers: return cache = StatePersistor(self._models, self._controllers, persistence_file='cidr.yml') cached_info = cache.recall_info([self._cidr]) return cached_info def _update_from_cache(self): cache = self._get_cache() if cache: if self._ip_index_start < cache['ip_index_start']: self._ip_index_start = cache['ip_index_start'] if self._ip_index_end < cache['ip_index_end']: self._ip_index_end = cache['ip_index_end'] @property def netmask(self): return self._ip.netmask @property def gateway(self): return self._gateway @gateway.setter def gateway(self, value): self._gateway = value def to_json(self): value = '{ ' value += '"cidr": "%s", ' % self._cidr value += '"start-address": "%s", ' % str(self._ip) value += '"ip-index-start": %d, ' % self._ip_index_start value += '"ip-index-end": %d' % self._ip_index_end if self._gateway: value += ', "gateway": "%s"' % self._gateway value += ' }' return value @property def cidr(self): return self._cidr @property def start_address(self): return self._start_address @start_address.setter def start_address(self, value): self._ip_index_start = 1 self._ip_index_end = 1 self._start_address = value test_address = IPNetwork(value).ip for ip in self._ip.iter_hosts(): if hex(ip) == hex(test_address): break self._ip_index_start += 1 self._ip_index_end += 1 self._update_from_cache() @property def ip_size(self): return self._ip_size @property def ip_index_start(self): return self._ip_index_start @ip_index_start.setter def ip_index_start(self, value): self._ip_index_start = value @property def ip_index_end(self): return self._ip_index_end @ip_index_end.setter def ip_index_end(self, value): self._ip_index_end = value
class Controller(object): """Implementation of the clouds controller. This class implements functions to easily work with the available cloud objects. So far, it provides the following functionalities/abstractions: - crediting system (also terminate service when user out of credits) - adding nodes (VMs) - removing nodes (VMs) """ def __init__(self, config_parser, **kwargs): # TODO: retrieve this file name from the director.cfg configuration file & create the log file if it is not yet present init('/var/log/cpsdirector/debugging.log') self.__logger = create_logger(__name__) self.__logger.setLevel(logging.DEBUG) # Params for director callback self.__conpaas_name = config_parser.get('manager', 'DEPLOYMENT_NAME') self.__conpaas_creditUrl = config_parser.get('manager', 'CREDIT_URL') self.__conpaas_terminateUrl = config_parser.get('manager', 'TERMINATE_URL') self.__conpaas_service_id = config_parser.get('manager', 'SERVICE_ID') self.__conpaas_user_id = config_parser.get('manager', 'USER_ID') self.__conpaas_app_id = config_parser.get('manager', 'APP_ID') self.__conpaas_caUrl = config_parser.get('manager', 'CA_URL') # Set the CA URL as IPOP's base namespace self.__ipop_base_namespace = self.__conpaas_caUrl if config_parser.has_option('manager', 'IPOP_BASE_IP'): # Application-level network self.__ipop_base_ip = config_parser.get('manager', 'IPOP_BASE_IP') else: self.__ipop_base_ip = None if config_parser.has_option('manager', 'IPOP_NETMASK'): # Application-level netmask self.__ipop_netmask = config_parser.get('manager', 'IPOP_NETMASK') else: self.__ipop_netmask = None if config_parser.has_option('manager', 'IPOP_BOOTSTRAP_NODES'): # Application-level network's bootstrap nodes self.__ipop_bootstrap_nodes = config_parser.get('manager', 'IPOP_BOOTSTRAP_NODES') else: self.__ipop_bootstrap_nodes = None if config_parser.has_option('manager', 'IPOP_SUBNET'): # Only import from netaddr if IPOP has to be started from netaddr import IPNetwork # Subnet assigned to this service by the director self.__ipop_subnet = IPNetwork( config_parser.get('manager', 'IPOP_SUBNET')) else: self.__ipop_subnet = None # For crediting system self.__reservation_logger = create_logger('ReservationTimer') self.__reservation_map = {'manager': ReservationTimer(['manager'], 55 * 60, # 55mins self.__deduct_and_check_credit, self.__reservation_logger)} self.__reservation_map['manager'].start() self.__force_terminate_lock = Lock() self.config_parser = config_parser self.__created_nodes = [] self.__partially_created_nodes = [] self.__available_clouds = [] self.__default_cloud = None if config_parser.has_option('iaas', 'DRIVER'): self.__default_cloud = iaas.get_cloud_instance( 'iaas', config_parser.get('iaas', 'DRIVER').lower(), config_parser) self.__available_clouds.append(self.__default_cloud) if config_parser.has_option('iaas', 'OTHER_CLOUDS'): self.__logger.debug("attempt iaas.get_clouds()") try: self.__available_clouds.extend(iaas.get_clouds(config_parser)) except Exception as e: self.__logger.debug("failed iaas.get_clouds()") self.__reservation_map['manager'].stop() raise e self.__logger.debug("succeeded iaas.get_clouds()") # Setting VM role self.role = 'agent' def get_available_ipop_address(self): """Return an unassigned IP address in this manager's VPN subnet""" # Network iterator network = self.__ipop_subnet.iter_hosts() # Currently running hosts running_hosts = [ str(node.ip) for node in self.__created_nodes + self.__partially_created_nodes ] self.__logger.debug("get_available_ipop_address: running nodes: %s" % running_hosts) # The first address is used by IPOP internally network.next() # The second one is taken by manager network.next() for host in network: host = str(host) if host not in running_hosts: self.__logger.debug("get_available_ipop_address: returning %s" % host) return host #=========================================================================# # create_nodes(self, count, contextFile, test_agent) # #=========================================================================# def create_nodes(self, count, test_agent, port, cloud=None, inst_type=None): """ Creates the VMs associated with the list of nodes. It also tests if the agents started correctly. @param count The number of nodes to be created @param test_agent A callback function to test if the agent started correctly in the newly created VM @param port The port on which the agent will listen @param cloud (Optional) If specified, this function will start new nodes inside cloud, otherwise it will start new nodes inside the default cloud or wherever the controller wants (for now only the default cloud is used) @return A list of nodes of type node.ServiceNode """ self.__logger.debug('[create_nodes]') ready = [] poll = [] iteration = 0 if count == 0: return [] if cloud is None: cloud = self.__default_cloud if not self.deduct_credit(count): raise Exception('Could not add nodes. Not enough credits.') while len(ready) < count: iteration += 1 msg = '[create_nodes] iter %d: creating %d nodes on cloud %s' % ( iteration, count - len(ready), cloud.cloud_name) if inst_type: msg += ' of type %s' % inst_type self.__logger.debug(msg) try: self.__force_terminate_lock.acquire() if iteration == 1: request_start = time.time() service_type = self.config_parser.get('manager', 'TYPE') if service_type == 'galera': service_type = 'mysql' if self.role == 'manager': role_abbr = 'mgr' else: role_abbr = 'agt' # eg: conpaas-online-u3-s1-xtreemfs-mgr name = "%s-u%s-s%s-%s-%s" % (self.__conpaas_name, self.__conpaas_user_id, self.__conpaas_service_id, service_type, role_abbr) if (service_type == 'htc'): # If HTC is used we need to update here as well (as I see no way to do this elsewhere) self.add_context_replacement({ # 'CLOUD_VMID': cloud.cloud_vmid, 'CLOUD_NAME': cloud.cloud_name, 'CLOUD_MACHINE_TYPE': self.config_parser.get(cloud.cloud_name, 'INST_TYPE') , 'CLOUD_COST_PER_TIME': self.config_parser.get(cloud.cloud_name, 'COST_PER_TIME'), 'CLOUD_MAX_VMS_ALL_CLOUDS': self.config_parser.get('iaas', 'MAX_VMS_ALL_CLOUDS'), 'CLOUD_MAX_VMS': self.config_parser.get(cloud.cloud_name, 'MAX_VMS') }, cloud) if self.__ipop_base_ip and self.__ipop_netmask: # If IPOP has to be used we need to update VMs # contextualization data for each new instance for _ in range(count - len(ready)): vpn_ip = self.get_available_ipop_address() self.add_context_replacement({ 'IPOP_IP_ADDRESS': vpn_ip }, cloud) for newinst in cloud.new_instances(1, name, inst_type): # Set VPN IP newinst.ip = vpn_ip if newinst.private_ip == '': # If private_ip is not set yet, use vpn_ip newinst.private_ip = vpn_ip self.__partially_created_nodes.append(newinst) else: self.__logger.debug("[create_nodes]: cloud.new_instances(%d, %s, %s)" % ( count - len(ready), name, inst_type ) ) self.__partially_created_nodes = cloud.new_instances( count - len(ready), name, inst_type) self.__logger.debug("[create_nodes]: cloud.new_instances returned %s" % self.__partially_created_nodes) except Exception as e: self.__logger.exception( '[create_nodes]: Failed to request new nodes') self.delete_nodes(ready) self.__partially_created_nodes = [] raise e finally: self.__force_terminate_lock.release() poll, failed = self.__wait_for_nodes( self.__partially_created_nodes, test_agent, port) ready += poll poll = [] if failed: self.__logger.debug('[create_nodes]: %d nodes ' 'failed to startup properly: %s' % (len(failed), str(failed))) self.__partially_created_nodes = [] self.delete_nodes(failed) self.__force_terminate_lock.acquire() self.__created_nodes += ready self.__partially_created_nodes = [] self.__force_terminate_lock.release() # start reservation timer with slack of 3 mins + time already wasted # this should be enough time to terminate instances before # hitting the following hour timer = ReservationTimer([i.id for i in ready], (55 * 60) - (time.time() - request_start), self.__deduct_and_check_credit, self.__reservation_logger) timer.start() # set mappings for i in ready: self.__reservation_map[i.id] = timer return ready #=========================================================================# # delete_nodes(self, nodes) # #=========================================================================# def delete_nodes(self, nodes): """Kills the VMs associated with the list of nodes. @param nodes The list of nodes to be removed; - a node must be of type ServiceNode or a class that extends ServiceNode """ for node in nodes: cloud = self.get_cloud_by_name(node.cloud_name) self.__logger.debug('[delete_nodes]: killing ' + str(node.id)) try: # node may not be in map if it failed to start if node.id in self.__reservation_map: timer = self.__reservation_map.pop(node.id) if timer.remove_node(node.id) < 1: timer.stop() cloud.kill_instance(node) except: self.__logger.exception('[delete_nodes]: ' 'Failed to kill node %s', node.id) #=========================================================================# # list_vms(self, cloud=None) # #=========================================================================# def list_vms(self, cloud=None): """Returns an array with the VMs running at the given/default(s) cloud. @param cloud (Optional) If specified, this method will return the VMs already running at the given cloud """ if cloud is None: cloud = self.__default_cloud return cloud.list_vms() #=========================================================================# # generate_context(self, service_name, replace, cloud) # #=========================================================================# def generate_context(self, service_name, cloud=None, ip_whitelist=None): """Generates the contextualization file for the default/given cloud. @param cloud (Optional) If specified, the context will be generated for it, otherwise it will be generated for all the available clouds @param service_name Used to know which config_files and scripts to select """ def __set_cloud_ctx(cloud): contxt = self._get_context_file(service_name, cloud.get_cloud_type()) cloud.set_context(contxt) if cloud is None: for cloud in self.__available_clouds: __set_cloud_ctx(cloud) else: __set_cloud_ctx(cloud) def add_context_replacement(self, replace={}, cloud=None, strict=False): """Add a variable replacement to the variable replacements to apply to the context template for the default/given cloud. @param replace A dictionary that specifies which words should be replaced with what. For example: replace = dict(name='A', age='57') context1 = '$name , $age' => new_context1 = 'A , 57' context2 ='${name}na, ${age}' => new_context2 = 'Ana, 57' @param cloud (Optional) If specified, the context will be generated for it, otherwise it will be generated for the default cloud @param strict If true, then setting a replacement for an already replaced variable will raise an exception. """ if cloud is None: cloud = self.__default_cloud cloud.add_context_replacement(replace, strict) #=========================================================================# # get_clouds(self) # #=========================================================================# def get_clouds(self): """ @return The list of cloud objects """ return self.__available_clouds #=========================================================================# # get_cloud_by_name(self) # #=========================================================================# def get_cloud_by_name(self, cloud_name): """ @param cloud_name @return The cloud object which name is the same as @param name """ try: return [ cloud for cloud in self.__available_clouds if cloud.get_cloud_name() == cloud_name ][0] except IndexError: raise Exception("Unknown cloud: %s. Available clouds: %s" % ( cloud_name, self.__available_clouds)) #=========================================================================# # get_default_cloud(self) # #=========================================================================# def get_default_cloud(self): return self.__default_cloud #=========================================================================# # config_cloud(self, cloud, config_params) # #=========================================================================# def config_cloud(self, cloud, config_params): """Configures some parameters in the given cloud @param cloud The cloud to be configured @param config_params A dictionary containing the configuration parameters (are specific to the cloud) """ cloud.config(config_params) #=========================================================================# # config_clouds(self, config_params) # #=========================================================================# def config_clouds(self, config_params): """Same as config_cloud but for all available clouds @param config_params A dictionary containing the configuration parameters (are specific to the cloud) """ for cloud in self.__available_clouds: cloud.config(config_params) def __check_node(self, node, test_agent, port): """Return True if the given node has properly started an agent on the given port""" if node.ip == '' or node.private_ip == '': self.__logger.debug('[__check_node]: node.ip = %s, node.private_ip = %s: return False' % (node.ip, node.private_ip)) return False try: self.__logger.debug('[__check_node]: test_agent(%s, %s)' % ( node.ip, port)) test_agent(node.ip, port) self.__logger.debug('[__check_node]: node = %s' % node.__repr__()) return True except socket.error, err: self.__logger.debug('[__check_node]: %s' % err) return False
def handleEvent(self, event): eventName = event.eventType srcModuleName = event.module eventData = event.data self.debug(f"Received event, {eventName}, from {srcModuleName}") if self.errorState: return if srcModuleName == "sfp_tool_onesixtyone": self.debug("Skipping event from myself.") return if not self.opts['onesixtyone_path']: self.error("You enabled sfp_tool_onesixtyone but did not set a path to the tool!") self.errorState = True return exe = self.opts['onesixtyone_path'] if self.opts['onesixtyone_path'].endswith('/'): exe = f"{exe}onesixtyone" if not os.path.isfile(exe): self.error(f"File does not exist: {exe}") self.errorState = True return if not SpiderFootHelpers.sanitiseInput(eventData, extra=['/']): self.debug("Invalid input, skipping.") return targets = [] try: if eventName == "NETBLOCK_OWNER" and self.opts['netblockscan']: net = IPNetwork(eventData) if net.prefixlen < self.opts['netblockscanmax']: self.debug(f"Skipping scanning of {eventData}, too big.") return for addr in net.iter_hosts(): targets.append(str(addr)) except BaseException as e: self.error(f"Strange netblock identified, unable to parse: {eventData} ({e})") return # Don't look up stuff twice, check IP == IP here if eventData in self.results: self.debug(f"Skipping {eventData} as already scanned.") return else: # Might be a subnet within a subnet or IP within a subnet for addr in self.results: if IPNetwork(eventData) in IPNetwork(addr): self.debug(f"Skipping {eventData} as already within a scanned range.") return self.results[eventData] = True # If we weren't passed a netblock, this will be empty if not targets: targets.append(eventData) for target in targets: args = [ exe, "-c", self.communitiesFile, target ] try: p = Popen(args, stdout=PIPE, stderr=PIPE) out, stderr = p.communicate(input=None) stdout = out.decode(sys.stdin.encoding) except Exception as e: self.error(f"Unable to run onesixtyone: {e}") continue if p.returncode != 0: self.error(f"Unable to read onesixtyone output\nstderr: {stderr}\nstdout: {stdout}") continue if not stdout: self.debug(f"onesixtyone returned no output for {target}") continue for result in stdout.split("\n"): srcevent = event if target not in result: continue if target != eventData: srcevent = SpiderFootEvent("IP_ADDRESS", target, self.__name__, event) self.notifyListeners(srcevent) e = SpiderFootEvent('UDP_PORT_OPEN', f"{target}:161", self.__name__, srcevent) self.notifyListeners(e) e = SpiderFootEvent("UDP_PORT_OPEN_INFO", result, self.__name__, e) self.notifyListeners(e)
class Network: _json_file = File('ipnetworks') _temp = _defaultdict() _global_existing_ipset = collections.defaultdict(IPSet) def __init__(self, cidr, _test=False, **kwargs): if not _test: self._json_file = File('_ipnetwork') self._ipnetwork = IPNetwork(cidr) self._cidr = self._ipnetwork.cidr.__str__() self._prefixlen = self._ipnetwork.prefixlen if cidr != self._cidr: raise AnsibleError( "Invalid IP network: %s is an IP address belong to %s" % (cidr, self._cidr)) elif self._prefixlen > 30: raise AnsibleError( "Network prefix length > 30 is not supported: %s" % self._cidr) self._ipset = IPSet(self._ipnetwork) self._existing_ipset = self._global_existing_ipset[self._cidr] self._overlaps = None try: self._attrs = self._json_file[self._cidr] except KeyError: self._attrs = { 'description': None, 'parent': None, 'is_parent': False, 'state': None, **kwargs } self._json_file[self._cidr] = self._attrs for _cidr, _attrs in self._json_file.items(): _ipnetwork = IPNetwork(_cidr) if self._cidr != _cidr: if self._ipnetwork.__contains__(_ipnetwork): if _attrs['state'] == 'allocated': self._overlaps = cidr break self._existing_ipset.add(_ipnetwork) # warning('A*** ', self._cidr, ',', _cidr, _attrs, '\n') elif _ipnetwork.__contains__(self._ipnetwork): self._attrs['parent'] = _cidr self._global_existing_ipset[_cidr].add(self._ipnetwork) # warning('B*** ', _cidr, ',', self._cidr, self._attrs, '\n') @property def _size(self): return self._ipnetwork.size - 2 @property def _usable_ip(self): return self._size - 2 @property def _iprange(self): return '{}-{}'.format(self._ipnetwork.network, self._ipnetwork.broadcast) @property def _netmask(self): return str(self._ipnetwork.netmask) @property def _wildmask(self): return str(self._ipnetwork.hostmask) def _check_overlaps(self): if self._overlaps: raise AnsibleError('Network %s overlap with existing network %s' % (self._cidr, self._overlaps)) def iter_subnets(self, prefixlen, random=True): if self._attrs['state'] == 'allocated': raise AnsibleError('Network %s has already assign hosts' % self._cidr) self._check_overlaps() cidrs = (self._ipset - self._existing_ipset).iter_cidrs() subnets = [s for c in cidrs for s in c.subnet(prefixlen)] if random: shuffle(subnets) for subnet in subnets: self._existing_ipset.add(subnet) yield str(subnet) def iter_ipaddrs(self, random=True, reverse=False): if len(self._existing_ipset) > 0: raise AnsibleError("Network %s is already subnetted" % self._cidr) self._check_overlaps() try: self._existing_ipset.update( IPSet([i for i in self._json_file[self._cidr]['ipaddresses']])) except KeyError: pass ipaddrs = [ i for i in IPSet(self._ipnetwork.iter_hosts()) - self._existing_ipset ] if random: shuffle(ipaddrs) elif reverse: reversed(ipaddrs) self._attrs.update({'state': 'allocated', 'ipaddresses': []}) self._temp[self._cidr].update(self._attrs) for ipaddr in ipaddrs: self._temp[self._cidr]['ipaddresses'].append(str(ipaddr)) self._existing_ipset.add(ipaddr) yield self._get_addrs(ipaddr) def _get_addrs(self, ipaddr): ip = str(ipaddr) prefix = '%s/%s' % (ip, self._prefixlen) host = '%s/32' % ip netmask = (ip, self._netmask) wildmask = (ip, self._wildmask) return ip, prefix, host def last_ip(self, incl_prefix=False): return self._get_addrs(IPAddress(self._ipnetwork.last - 1)) def first_ip(self, incl_prefix=False): return self._get_addrs(IPAddress(self._ipnetwork.first + 1)) def remove_subnet(self, subnet): for k, v in self._json_file.copy().items(): if k == subnet and v['parent'] == self._cidr: del self._json_file[k] # del self._json_file[self._cidr] def _host_addr(self, addr): return re.sub(r'/\d+', '/32', addr) def remove_ipaddr(self, ipaddr): try: del self._attrs['ipaddresses'][ipaddr] except KeyError: pass def save(self): self._json_file.update(self._temp) self._json_file.save()