예제 #1
0
파일: plugin.py 프로젝트: tonicbupt/nerub
def create_endpoint():
    data = request.get_json(force=True)
    app.logger.debug('CreateEndpoint JSON=%s', data)

    endpoint_id = data['EndpointID']
    network_id = data['NetworkID']
    interface = data['Interface']

    app.logger.info('Creating endpoint %s', endpoint_id)

    # docker sent me 172.19.0.3/16 ...
    address_ip4 = interface.get('Address', None)
    if address_ip4 and '/' in address_ip4:
        address_ip4 = IPAddress(address_ip4.split('/', 1)[0])

    network = Network.get(network_id)

    if not network:
        error_message = "CreateEndpoint called but network doesn\'t exist" \
                        " Endpoint ID: %s Network ID: %s" % \
                        (endpoint_id, network_id)
        app.logger.error(error_message)
        raise Exception(error_message)

    network.acquire_ip(endpoint_id, hostname, ip=address_ip4)
    app.logger.debug('CreateEndpoint response JSON=%s', {})
    return jsonify({})
예제 #2
0
파일: IPv4.py 프로젝트: chubbymaggie/netzob
    def encode(data, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
        """Encodes the specified data into an IPAddress object

        :param data: the data to encode into an IPAddress
        :type data: str or raw bytes (BBBB)
        :return: the encoded IPAddress
        """
        if isinstance(data, (str, int)):
            try:
                ip = IPAddress(data)
                if ip is not None and ip.version == 4 and not ip.is_netmask():
                    return ip
            except:
                pass
        try:

            structFormat = ">"
            if endianness == AbstractType.ENDIAN_BIG:
                structFormat = ">"

            if not sign == AbstractType.SIGN_SIGNED:
                structFormat += "bbbb"
            else:
                structFormat += "BBBB"
            quads = map(str, struct.unpack(structFormat, data))
            strIP = string.join(quads, '.')

            ip = IPAddress(strIP)
            if ip is not None and ip.version == 4 and not ip.is_netmask():
                return ip
        except Exception, e:
            raise TypeError("Impossible encode {0} into an IPv4 data ({1})".format(data, e))
예제 #3
0
def auto_select_target(target, output=None):
    """Auto selection logic"""
    print "Target: %s" % target
    try:
        inp=IPAddress(target);
        if inp.is_private() or inp.is_loopback():
            print "Internal IP Detected : Skipping"
            sys.exit()
        else:
            print "Looks like an IP, running ipOsint...\n"
            ipOsint.run(target, output)
    except SystemExit:
        print "exiting"
    except AddrFormatError:
        if re.match('[^@]+@[^@]+\.[^@]+', target):
            print "Looks like an EMAIL, running emailOsint...\n"
            emailOsint.run(target, output)
        elif get_tld(target, fix_protocol=True,fail_silently=True) is not None:
            print "Looks like a DOMAIN, running domainOsint...\n"
            domainOsint.run(target, output)
        else:
            print "Nothing Matched assuming username, running usernameOsint...\n"
            usernameOsint.run(target, output)
    except:
        print "Unknown Error Occured"
예제 #4
0
 def address(self, value):
     ip = IPAddress(self.ipformat(value))
     if ip.is_loopback():
         raise ValidationError("You cannot use a loopback address")
     if ip.is_multicast():
         raise ValidationError("You cannot use a multicast address")
     self._address = value
    def validate_ip(self, request, remote_ip):
        # When we aren't configured to restrict on IP address
        if not getattr(settings, 'RESTRICTEDSESSIONS_RESTRICT_IP', True):
            return True
        # When the IP address key hasn't yet been set on the request session
        if SESSION_IP_KEY not in request.session:
            return True
        # When there is no remote IP, check if one has been set on the session
        session_ip = request.session[SESSION_IP_KEY]
        if not remote_ip:
            if session_ip:  # session has remote IP value so validate :-(
                return False
            else:  # Session doesn't have remote IP value so possibly :-)
                return True

        # Compute fuzzy IP compare based on settings on compare sensitivity
        session_network = IPNetwork(session_ip)
        remote_ip = IPAddress(remote_ip)
        try:
            session_network = session_network.ipv4()
            remote_ip = remote_ip.ipv4()
            session_network.prefixlen = getattr(settings, 'RESTRICTEDSESSIONS_IPV4_LENGTH', 32)
        except AddrConversionError:
            try:
                session_network.prefixlen = getattr(settings, 'RESTRICTEDSESSIONS_IPV6_LENGTH', 64)
            except AddrFormatError:
                # session_network must be IPv4, but remote_ip is IPv6
                return False
        return remote_ip in session_network
예제 #6
0
def get_priv_info(d_iface_to_addr=None):
    s_net_id = None
    s_priv_ip = None
    s_priv_interface = None
    if d_iface_to_addr is None:
        d_iface_to_addr = get_iface_to_addr()
    networks = search('net', 'name', '*')
    for s_iface, d_addr in d_iface_to_addr.items():
        if s_iface.startswith('lo'):
            continue
        if netifaces.AF_INET not in d_addr:
            continue
        ips = d_addr[netifaces.AF_INET]
        for ip in ips:
            o_ip = IPAddress(str(ip['addr']))
            if not o_ip.is_private():
                continue
            if ip['addr'] == '127.0.0.1':
                continue
            for net in networks:
                if (('netmask' in net) and
                        (o_ip in IPNetwork(net['netmask']))):
                    s_priv_ip = str(ip['addr'])
                    s_priv_interface = s_iface
                    s_net_id = net['name']
                    break
    return (s_priv_ip, s_priv_interface, s_net_id)
예제 #7
0
    def get_ip_address(self, test_address=None):
        """
        try to get global IP address from interface information.
        if failed, just return '127.0.0.1'

        :param str test_address: ip address str if test to check global ip.
                                  normally None.
        :return: global ip address if successed, or '127.0.0.1'
        """
        for iface_name in netifaces.interfaces():
            iface_data = netifaces.ifaddresses(iface_name)
            logging.debug('Interface: %s' % (iface_name, ))
            ifaces = []
            if netifaces.AF_INET in iface_data:
                ifaces += iface_data[netifaces.AF_INET]
            if netifaces.AF_INET6 in iface_data:
                ifaces += iface_data[netifaces.AF_INET6]
            for iface in ifaces:
                ip = iface['addr']
                ip = re.sub(r'\%.+$', '', ip)
                if test_address is not None:
                    ip = test_address
                addr = IPAddress(ip)
                if not addr.is_loopback() and addr.is_unicast() and\
                   not addr.is_private():
                    logging.debug('global ip %s', addr)
                    return ip
        logging.debug('no global ip')
        return '127.0.0.1'
예제 #8
0
def to_server_dict(server):
    public_ips = [ip["addr"] for ip in server.addresses["public"]]
    private_ips = [ip["addr"] for ip in server.addresses["private"]]

    # Pick out first public IPv4 and IPv6 address
    public_ipv4 = None
    public_ipv6 = None

    for ip in public_ips:
        try:
            ip_obj = IPAddress(ip)
        except Exception:
            continue

        if not ip_obj.is_private():
            if ip_obj.version == 4:
                public_ipv4 = ip
            elif ip_obj.version == 6:
                public_ipv6 = ip

    result = {
        "id": server.id,
        "name": server.name,
        "status": server.status,
        "image_id": server.image["id"],
        "flavor_id": server.flavor["id"],
        "public_ips": public_ips,
        "private_ips": private_ips,
        "public_ipv4": public_ipv4,
        "public_ipv6": public_ipv6,
        "key_name": server.key_name,
        "metadata": server.metadata,
    }
    return result
예제 #9
0
def is_valid_netmask(ip_addr):
    """Valid the format of a netmask"""
    try:
        ip_address = IPAddress(ip_addr)
        return ip_address.is_netmask()

    except Exception:
        return False
예제 #10
0
파일: website.py 프로젝트: UltrosBot/Ultros
    def call(self, url, context):
        if self.url_can_resolve(url):
            try:
                ip = yield self.resolver.get_host_by_name(url.domain)
                ip = IPAddress(ip)
            except Exception:
                # context["event"].target.respond(
                #     u'[Error] Failed to handle URL: {}'.format(
                #         url.to_string()
                #     )
                # )

                self.plugin.logger.exception("Error while checking DNS")
                returnValue(STOP_HANDLING)
                return

            if ip.is_loopback() or ip.is_private() or ip.is_link_local() \
                    or ip.is_multicast():
                self.plugin.logger.warn(
                    "Prevented connection to private/internal address"
                )

                returnValue(STOP_HANDLING)
                return

        headers = {}

        if url.domain in context["config"]["spoofing"]:
            user_agent = context["config"]["spoofing"][url.domain]

            if user_agent:
                headers["User-Agent"] = user_agent
        else:
            headers["User-Agent"] = context["config"].get(
                "default_user_agent",
                "Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 "
                "Firefox/36.0"
            )

        domain_langs = context.get("config") \
            .get("accept_language", {}) \
            .get("domains", {})

        if url.domain in domain_langs:
            headers["Accept-Language"] = domain_langs.get(url.domain)
        else:
            headers["Accept-Language"] = context.get("config") \
                .get("accept_language", {}) \
                .get("default", "en")

        session = self.get_session(url, context)
        session.get(unicode(url), headers=headers, stream=True,
                    background_callback=self.background_callback) \
            .addCallback(self.callback, url, context, session) \
            .addErrback(self.errback, url, context, session)

        returnValue(STOP_HANDLING)
예제 #11
0
def test_ipaddress_v4():
    ip = IPAddress('192.0.2.1')
    assert ip.version == 4
    assert repr(ip) == "IPAddress('192.0.2.1')"
    assert str(ip) == '192.0.2.1'
    assert ip.format() == '192.0.2.1'
    assert int(ip) == 3221225985
    assert hex(ip) == '0xc0000201'
    assert ip.bin == '0b11000000000000000000001000000001'
    assert ip.bits() == '11000000.00000000.00000010.00000001'
    assert ip.words == (192, 0, 2, 1)
예제 #12
0
def get_version(request):
    func = request.GET.get('func', '')
    remote_addr = IPAddress(request.remote_addr)
    data = {
        'address': remote_addr.format(),
        'version': remote_addr.version,
        'ipv4_mapped': remote_addr.is_ipv4_mapped(),
    }
    return Response(
            body='%s(%s);' % (func, json.dumps(data)),
            content_type='text/javascript')
예제 #13
0
	def reload(self):
		self._nameservers = []
		ns = self._service.nameserversConfig()
		for n in ns:
			ip = IPAddress(n)
			if ip.version == 4:
				cfg = ConfigIP( default=toIP4List(ip.format()))
				self._nameservers.append(cfg)
			elif ip.version == 6:
				cfg = ConfigIP6(default=ip.format())
				self._nameservers.append(cfg)
예제 #14
0
def test_ipaddress_v6():
    ip = IPAddress('fe80::dead:beef')
    assert ip.version == 6
    assert repr(ip) == "IPAddress('fe80::dead:beef')"
    assert str(ip) == 'fe80::dead:beef'
    assert ip.format() == 'fe80::dead:beef'
    assert int(ip) == 338288524927261089654018896845083623151
    assert hex(ip) == '0xfe8000000000000000000000deadbeef'
    assert ip.bin == '0b11111110100000000000000000000000000000000000000000000000000000000000000000000000000000000000000011011110101011011011111011101111'
    assert ip.bits() == '1111111010000000:0000000000000000:0000000000000000:0000000000000000:0000000000000000:0000000000000000:1101111010101101:1011111011101111'
    assert ip.words == (65152, 0, 0, 0, 0, 0, 57005, 48879)
예제 #15
0
파일: sslutils.py 프로젝트: EdDev/vdsm
    def _normalize_ip_address(addr):
        """
        When we used mapped ipv4 (starting with ::FFFF/96) we need to
        normalize it to ipv4 in order to compare it with value used
        in commonName in the certificate.
        """
        ip = IPAddress(addr)
        if ip.is_ipv4_mapped():
            addr = str(ip.ipv4())

        return addr
예제 #16
0
def normalize_mapped_address(ipaddr):
    """
    Converts a IPv4-mapped IPv6 address into a IPv4 address. Handles both the
    ::ffff:192.0.2.128 format as well as the deprecated ::192.0.2.128 format.

    :param ipaddr: IP address [str]
    :return: normalized IP address [str]
    """
    ipaddr = IPAddress(ipaddr)
    if ipaddr.is_ipv4_compat() or ipaddr.is_ipv4_mapped():
        ipaddr = ipaddr.ipv4()
    return str(ipaddr)
 def ipv4_to_cidr(address, netmask=None, prefixlen=None):
     a = IPAddress(address)
     n = IPNetwork(a)
     if netmask:
         assert prefixlen is None, 'Cannot provide both netmask and prefixlen'
         m = IPAddress(netmask)
         assert m.is_netmask(), 'A valid netmask is required' 
         n.prefixlen = m.netmask_bits() 
     else:
         assert prefixlen, 'Provide either netmask or prefixlen'
         n.prefixlen = int(prefixlen)
     return str(n.cidr)
    def validate_ip(self, request, remote_ip):
        if not getattr(settings, 'RESTRICTEDSESSIONS_RESTRICT_IP', True) or not SESSION_IP_KEY in request.session:
            return True

        session_network = IPNetwork(request.session[SESSION_IP_KEY])
        remote_ip = IPAddress(remote_ip)
        try:
            session_network = session_network.ipv4()
            remote_ip = remote_ip.ipv4()
            session_network.prefixlen = getattr(settings, 'RESTRICTEDSESSIONS_IPV4_LENGTH', 32)
        except AddrConversionError:
            session_network.prefixlen = getattr(settings, 'RESTRICTEDSESSIONS_IPV6_LENGTH', 64)
        return remote_ip in session_network
 def test_01_add_ip_same_cidr(self):
     """Test add guest ip range in the existing cidr
     """
     # call increment_cidr function to get exiting cidr from the setup and
     # increment it
     ip2 = self.increment_cidr()
     test_nw = ip2.network
     ip = IPAddress(test_nw)
     # Add IP range(5 IPs) in the new CIDR
     test_gateway = ip.__add__(1)
     test_startIp = ip.__add__(3)
     test_endIp = ip.__add__(10)
     test_startIp2 = ip.__add__(11)
     test_endIp2 = ip.__add__(15)
     # Populating services with new IP range
     self.services["vlan_ip_range"]["startip"] = test_startIp
     self.services["vlan_ip_range"]["endip"] = test_endIp
     self.services["vlan_ip_range"]["gateway"] = test_gateway
     self.services["vlan_ip_range"]["netmask"] = self.netmask
     self.services["vlan_ip_range"]["zoneid"] = self.zone.id
     self.services["vlan_ip_range"]["podid"] = self.pod.id
     # create new vlan ip range
     self.debug("Creating new ip range with new cidr in the same vlan")
     new_vlan = PublicIpRange.create(
         self.apiclient,
         self.services["vlan_ip_range"])
     self.debug(
         "Created new vlan range with startip:%s and endip:%s" %
         (test_startIp, test_endIp))
     self.cleanup.append(new_vlan)
     new_vlan_res = new_vlan.list(self.apiclient, id=new_vlan.vlan.id)
     # Compare list output with configured values
     self.verify_vlan_range(new_vlan_res, self.services["vlan_ip_range"])
     # Add few more ips in the same CIDR
     self.services["vlan_ip_range"]["startip"] = test_startIp2
     self.services["vlan_ip_range"]["endip"] = test_endIp2
     self.debug("Creating new ip range in the existing CIDR")
     new_vlan2 = PublicIpRange.create(
         self.apiclient,
         self.services["vlan_ip_range"])
     self.debug(
         "Created new vlan range with startip:%s and endip:%s" %
         (test_startIp2, test_endIp2))
     self.cleanup.append(new_vlan2)
     # list new vlan ip range
     new_vlan2_res = new_vlan2.list(self.apiclient, id=new_vlan2.vlan.id)
     # Compare list output with configured values
     self.verify_vlan_range(new_vlan2_res, self.services["vlan_ip_range"])
     return
예제 #20
0
def is_valid_gateway(ip_addr):
    """Valid the format of gateway"""

    invalid_ip_prefix = ['0', '224', '169', '127']
    try:
        # Check if ip_addr is an IP address and not start with 0
        ip_addr_prefix = ip_addr.split('.')[0]
        if is_valid_ip(ip_addr) and ip_addr_prefix not in invalid_ip_prefix:
            ip_address = IPAddress(ip_addr)
            if not ip_address.is_multicast():
                # Check if ip_addr is not multicast and reserved IP
                return True
        return False
    except Exception:
        return False
    def test_03_del_ip_range(self):
        """Test delete ip range

           Steps:
           1.Add ip range in same/new cidr
           2.delete the ip range added at step1
           3.Verify the ip range deletion using list APIs
        """
        # call increment_cidr function to get exiting cidr from the setup and
        # increment it
        ip2 = self.increment_cidr()
        test_nw = ip2.network
        ip = IPAddress(test_nw)
        # Add IP range(5 IPs) in the new CIDR
        test_gateway = ip.__add__(1)
        test_startIp = ip.__add__(3)
        test_endIp = ip.__add__(10)
        # Populating services with new IP range
        self.services["vlan_ip_range"]["startip"] = test_startIp
        self.services["vlan_ip_range"]["endip"] = test_endIp
        self.services["vlan_ip_range"]["gateway"] = test_gateway
        self.services["vlan_ip_range"]["netmask"] = self.netmask
        self.services["vlan_ip_range"]["zoneid"] = self.zone.id
        self.services["vlan_ip_range"]["podid"] = self.pod.id
        # create new vlan ip range
        self.debug("Creating new ip range in the new cidr")
        new_vlan = PublicIpRange.create(
            self.apiclient,
            self.services["vlan_ip_range"])
        self.debug(
            "Created new vlan range with startip:%s and endip:%s" %
            (test_startIp, test_endIp))
        new_vlan_res = new_vlan.list(self.apiclient, id=new_vlan.vlan.id)
        # Compare list output with configured values
        self.verify_vlan_range(new_vlan_res, self.services["vlan_ip_range"])
        # Delete the above IP range
        self.debug("Deleting new ip range added in new cidr")
        new_vlan.delete(self.apiclient)
        # listing vlan ip ranges with the id should through exception , if not
        # mark the test case as failed
        try:
            new_vlan.list(self.apiclient, id=new_vlan.vlan.id)
        except CloudstackAPIException as cs:
            self.debug(cs.errorMsg)
            self.assertTrue(
                cs.errorMsg.find("entity does not exist") > 0,
                msg="Failed to delete IP range")
        return
예제 #22
0
파일: utils.py 프로젝트: CyberHatcoil/ACF
def metadata_file_writer(q, filename):
    PLUGINS = load_metadata_plugins()
    metadata_file = open(filename, "w")
    ip_adresses = [0]
    while True:
        connection = q.get()
        ip = connection[6]
        ipAddress = IPAddress(hex_to_ip(ip))
        if ip not in ip_adresses and not ipAddress.is_private() and not ipAddress.is_loopback():
            ip_adresses.append(ip)
            for p in PLUGINS:
                p.set_connection(connection)
                res = p.run()
                if len(res):
                    metadata_file.write("%s, %s,%s\n" % (p.name, hex_to_ip(ip), res))
                    metadata_file.flush()
        q.task_done()
    def validate_ip(self, request, remote_ip):
        if not getattr(settings, 'RESTRICTEDSESSIONS_RESTRICT_IP', True) or not SESSION_IP_KEY in request.session:
            return True

        session_network = IPNetwork(request.session[SESSION_IP_KEY])
        remote_ip = IPAddress(remote_ip)
        try:
            session_network = session_network.ipv4()
            remote_ip = remote_ip.ipv4()
            session_network.prefixlen = IPV4_LENGTH
        except AddrConversionError:
            try:
                session_network.prefixlen = IPV6_LENGTH
            except AddrFormatError:
                # session_network must be IPv4, but remote_ip is IPv6
                return False
        return remote_ip in session_network
예제 #24
0
파일: validators.py 프로젝트: UMIACS/qav
 def validate(self, value):
     """Return a boolean if the value is a valid netmask."""
     try:
         self._choice = IPAddress(value)
     except AddrFormatError:
         self.error_message = '%s is not a valid IP address.' % value
         return False
     if self._choice.is_netmask():
         return True
     else:
         self.error_message = '%s is not a valid IP netmask.' % value
         return False
예제 #25
0
    def validate_ipv4_address(cls, _, value):
        """
        Ensures the :attr:`ip` address is valid.  This checks to ensure
        that the value provided is:

            * not a hostmask
            * not link local (:rfc:`3927`)
            * not used for multicast (:rfc:`1112`)
            * not a netmask (:rfc:`4632`)
            * not reserved (:rfc:`6052`)
            * a private address (:rfc:`1918`)
        """
        if value is None:
            return value

        try:
            address = IPAddress(value)

        except (AddrFormatError, ValueError) as e:
            raise ValueError(
                "%s is not a valid address format: %s" % (value, e))

        if ALLOW_AGENT_LOOPBACK:
            loopback = lambda: False
        else:
            loopback = address.is_loopback

        if any([address.is_hostmask(), address.is_link_local(),
                loopback(), address.is_multicast(),
                address.is_netmask(), address.is_reserved()]):
            raise ValueError("%s is not a valid address type" % value)

        return value
    def test_02_add_ip_diff_cidr(self):
        """Test add ip range in a new cidr

           Steps:
           1.Get public vlan range (guest cidr) from the setup
           2.Add IP range to a new cidr
        """
        # call increment_cidr function to get exiting cidr from the setup and
        # increment it
        ip2 = self.increment_cidr()
        test_nw = ip2.network
        ip = IPAddress(test_nw)
        # Add IP range(5 IPs) in the new CIDR
        test_gateway = ip.__add__(1)
        test_startIp = ip.__add__(3)
        test_endIp = ip.__add__(10)
        # Populating services with new IP range
        self.services["vlan_ip_range"]["startip"] = test_startIp
        self.services["vlan_ip_range"]["endip"] = test_endIp
        self.services["vlan_ip_range"]["gateway"] = test_gateway
        self.services["vlan_ip_range"]["netmask"] = self.netmask
        self.services["vlan_ip_range"]["zoneid"] = self.zone.id
        self.services["vlan_ip_range"]["podid"] = self.pod.id
        # create new vlan ip range
        self.debug("Adding new ip range in different CIDR in same vlan")
        new_vlan = PublicIpRange.create(
            self.apiclient,
            self.services["vlan_ip_range"])
        self.debug(
            "Created new vlan range with startip:%s and endip:%s" %
            (test_startIp, test_endIp))
        self.cleanup.append(new_vlan)
        new_vlan_res = new_vlan.list(self.apiclient, id=new_vlan.vlan.id)
        # Compare list output with configured values
        self.verify_vlan_range(new_vlan_res, self.services["vlan_ip_range"])
        return
    def same_ip(cls, orig_remote_ip, remote_ip):
        # Check is disabled -- always return true
        if not getattr(settings, 'RESTRICTEDSESSIONS_RESTRICT_IP', True):
            return True

        # No original IP or current IP is unknown
        if not orig_remote_ip or not remote_ip:
            return True

        session_network = IPNetwork(orig_remote_ip)
        remote_ip = IPAddress(remote_ip)
        try:
            session_network = session_network.ipv4()
            remote_ip = remote_ip.ipv4()
            session_network.prefixlen = getattr(settings, 'RESTRICTEDSESSIONS_IPV4_LENGTH', 32)
        except AddrConversionError:
            try:
                session_network.prefixlen = getattr(settings, 'RESTRICTEDSESSIONS_IPV6_LENGTH', 64)
            except AddrFormatError:
                # session_network must be IPv4, but remote_ip is IPv6
                return False

        # IP belongs to the same network
        return remote_ip in session_network
    def test_04_add_noncontiguous_ip_range(self):
        """Test adding non-contiguous ip range in existing cidr

            1.Add ip range in new cidr
            1.Add non-contigous ip range in cidr added at step1
            2.Verify the ip range using list APIs
        """
        # call increment_cidr function to get exiting cidr from the setup and
        # increment it
        ip2 = self.increment_cidr()
        test_nw = ip2.network
        ip = IPAddress(test_nw)
        # Add IP range(5 IPs) in the new CIDR
        test_gateway = ip.__add__(1)
        test_startIp = ip.__add__(50)
        test_endIp = ip.__add__(60)
        # Populating services with new IP range
        self.services["vlan_ip_range"]["startip"] = test_startIp
        self.services["vlan_ip_range"]["endip"] = test_endIp
        self.services["vlan_ip_range"]["gateway"] = test_gateway
        self.services["vlan_ip_range"]["netmask"] = self.netmask
        self.services["vlan_ip_range"]["zoneid"] = self.zone.id
        self.services["vlan_ip_range"]["podid"] = self.pod.id
        # create new vlan ip range
        new_vlan = PublicIpRange.create(
            self.apiclient,
            self.services["vlan_ip_range"])
        self.debug(
            "Created new vlan range with startip:%s and endip:%s" %
            (test_startIp, test_endIp))
        self.cleanup.append(new_vlan)
        new_vlan_res = new_vlan.list(self.apiclient, id=new_vlan.vlan.id)
        # Compare list output with configured values
        self.verify_vlan_range(new_vlan_res, self.services["vlan_ip_range"])
        # Add non-contiguous ip range in exiting cidr
        test_startIp2 = ip.__add__(10)
        test_endIp2 = ip.__add__(20)
        # Populating services with new IP range
        self.services["vlan_ip_range"]["startip"] = test_startIp2
        self.services["vlan_ip_range"]["endip"] = test_endIp2
        # create new vlan ip range
        self.debug("Adding non contiguous ip range")
        new_vlan = PublicIpRange.create(
            self.apiclient,
            self.services["vlan_ip_range"])
        self.debug(
            "Created new vlan range with startip:%s and endip:%s" %
            (test_startIp, test_endIp))
        self.cleanup.append(new_vlan)
        new_vlan_res = new_vlan.list(self.apiclient, id=new_vlan.vlan.id)
        # Compare list output with configured values
        self.verify_vlan_range(new_vlan_res, self.services["vlan_ip_range"])
        return
예제 #29
0
def invalid_subnet(rec):
    """Catch logins from unauthorized subnets"""
    valid_cidr = IPNetwork('10.2.0.0/24')
    ip = IPAddress(rec['columns']['host'])

    return (rec['name'] == 'logged_in_users' and ip not in valid_cidr)
예제 #30
0
 def parse_bgpstreamhist_csvs(self):
     with Connection(RABBITMQ_URI) as connection:
         self.update_exchange = create_exchange(
             "bgp-update", connection, declare=True
         )
         producer = Producer(connection)
         validator = mformat_validator()
         for csv_file in glob.glob("{}/*.csv".format(self.input_dir)):
             try:
                 with open(csv_file, "r") as f:
                     csv_reader = csv.reader(f, delimiter="|")
                     for row in csv_reader:
                         try:
                             if len(row) != 9:
                                 continue
                             if row[0].startswith("#"):
                                 continue
                             # example row: 139.91.0.0/16|8522|1403|1403 6461 2603 21320
                             # 5408
                             # 8522|routeviews|route-views2|A|"[{""asn"":1403,""value"":6461}]"|1517446677
                             this_prefix = row[0]
                             if row[6] == "A":
                                 as_path = row[3].split(" ")
                                 communities = json.loads(row[7])
                             else:
                                 as_path = []
                                 communities = []
                             service = "historical|{}|{}".format(row[4], row[5])
                             type_ = row[6]
                             timestamp = float(row[8])
                             peer_asn = int(row[2])
                             for prefix in self.prefixes:
                                 try:
                                     base_ip, mask_length = this_prefix.split("/")
                                     our_prefix = IPNetwork(prefix)
                                     if (
                                         IPAddress(base_ip) in our_prefix
                                         and int(mask_length) >= our_prefix.prefixlen
                                     ):
                                         msg = {
                                             "type": type_,
                                             "timestamp": timestamp,
                                             "path": as_path,
                                             "service": service,
                                             "communities": communities,
                                             "prefix": this_prefix,
                                             "peer_asn": peer_asn,
                                         }
                                         try:
                                             if validator.validate(msg):
                                                 msgs = normalize_msg_path(msg)
                                                 for msg in msgs:
                                                     key_generator(msg)
                                                     log.debug(msg)
                                                     producer.publish(
                                                         msg,
                                                         exchange=self.update_exchange,
                                                         routing_key="update",
                                                         serializer="ujson",
                                                     )
                                                     time.sleep(0.01)
                                             else:
                                                 log.warning(
                                                     "Invalid format message: {}".format(
                                                         msg
                                                     )
                                                 )
                                         except BaseException:
                                             log.exception(
                                                 "Error when normalizing BGP message: {}".format(
                                                     msg
                                                 )
                                             )
                                         break
                                 except Exception:
                                     log.exception("prefix")
                         except Exception:
                             log.exception("row")
             except Exception:
                 log.exception("exception")
예제 #31
0
 def update(self, obj=None):
     changed = self.update_vnc_obj(obj)
     if 'floating_ip_address' in changed and self.floating_ip_address:
         self.ip_version = IPAddress(self.floating_ip_address).version
     return changed
예제 #32
0
파일: dns.py 프로젝트: kefkahacks/pupy
    def run(self, args):
        launch_dns_ip_resolver = self.client.remote('pupyutils.dns',
                                                    'launch_dns_ip_resolver')
        launch_reverse_ip_resolver = self.client.remote(
            'pupyutils.dns', 'launch_reverse_ip_resolver', False)

        add_space = False

        for target in args.targets:
            if add_space:
                self.log(NewLine())

            try:
                address = str(IPAddress(target))
                self.log('Resolve IP: {}'.format(target))
                hostname = launch_reverse_ip_resolver(address)
                if hostname:
                    self.success('{}: {}'.format(address, hostname))
                else:
                    self.error('{}: Not found'.format(address))
                add_space = True
                continue
            except (ValueError, AddrFormatError):
                pass

            try:
                network = IPNetwork(target)
                objects = []
                self.log('Resolve Net: {} (size={})'.format(
                    target, len(network)))
                for ip in network:
                    ip = str(ip)
                    rip = launch_reverse_ip_resolver(ip)
                    if rip:
                        objects.append({'IP': ip, 'HOSTNAME': rip})

                self.success(Table(objects, ['IP', 'HOSTNAME']))
                add_space = True
                continue

            except AddrFormatError:
                pass

            self.log('Resolve hostname: {}'.format(target))
            known = set()
            found = False

            for k, v in launch_dns_ip_resolver(target).iteritems():
                if v and not type(v) == str:
                    v = [x for x in v if not x in known]
                    for x in v:
                        known.add(x)
                elif v:
                    known.add(v)

                if not v:
                    continue

                self.success('{}: {}'.format(
                    k, v if type(v) is str else ','.join(v)))
                found = True

            if not found:
                self.error('{}: Not found'.format(target))

            add_space = True
예제 #33
0
    def test_install_global_rules(self, m_ipset, m_devices, m_check_call):
        m_devices.interface_exists.return_value = False
        m_devices.interface_up.return_value = False
        m_set_ips = m_devices.set_interface_ips

        env_dict = {
            "FELIX_ETCDADDR": "localhost:4001",
            "FELIX_HOSTNAME": "myhost",
            "FELIX_INTERFACEPREFIX": "tap",
            "FELIX_METADATAADDR": "123.0.0.1",
            "FELIX_METADATAPORT": "1234",
            "FELIX_IPINIPENABLED": "True",
            "FELIX_IPINIPMTU": "1480",
            "FELIX_DEFAULTENDPOINTTOHOSTACTION": "RETURN"
        }
        config = load_config("felix_missing.cfg", env_dict=env_dict)
        config.IP_IN_IP_ADDR = IPAddress("10.0.0.1")

        m_v4_upd = Mock(spec=IptablesUpdater)
        m_v6_upd = Mock(spec=IptablesUpdater)
        m_v6_raw_upd = Mock(spec=IptablesUpdater)
        m_v4_nat_upd = Mock(spec=IptablesUpdater)
        m_v6_nat_upd = Mock(spec=IptablesUpdater)

        frules.install_global_rules(config, m_v4_upd, m_v6_upd, m_v4_nat_upd,
                                    m_v6_nat_upd, m_v6_raw_upd)

        self.assertEqual(m_v4_nat_upd.ensure_rule_inserted.mock_calls, [
            call(
                "POSTROUTING --out-interface tunl0 "
                "-m addrtype ! --src-type LOCAL --limit-iface-out "
                "-m addrtype --src-type LOCAL "
                "-j MASQUERADE",
                async=False),
            call("PREROUTING --jump felix-PREROUTING", async=False),
            call("POSTROUTING --jump felix-POSTROUTING", async=False),
        ])

        expected_chains = {
            'felix-FIP-DNAT': [],
            'felix-FIP-SNAT': [],
            'felix-PREROUTING': [
                '--append felix-PREROUTING --jump felix-FIP-DNAT',
                '--append felix-PREROUTING --protocol tcp --dport 80 --destination '
                '169.254.169.254/32 --jump DNAT --to-destination 123.0.0.1:1234'
            ],
            'felix-POSTROUTING':
            ['--append felix-POSTROUTING --jump felix-FIP-SNAT']
        }
        m_v4_nat_upd.rewrite_chains.assert_called_once_with(
            expected_chains, {
                'felix-PREROUTING': set(['felix-FIP-DNAT']),
                'felix-POSTROUTING': set(['felix-FIP-SNAT'])
            },
            async=False)

        self.assertEqual(m_v6_nat_upd.ensure_rule_inserted.mock_calls, [
            call("PREROUTING --jump felix-PREROUTING", async=False),
            call("POSTROUTING --jump felix-POSTROUTING", async=False),
        ])

        expected_chains = {
            'felix-FIP-DNAT': [],
            'felix-FIP-SNAT': [],
            'felix-PREROUTING':
            ['--append felix-PREROUTING --jump felix-FIP-DNAT'],
            'felix-POSTROUTING':
            ['--append felix-POSTROUTING --jump felix-FIP-SNAT']
        }
        m_v6_nat_upd.rewrite_chains.assert_called_once_with(
            expected_chains, {
                'felix-PREROUTING': set(['felix-FIP-DNAT']),
                'felix-POSTROUTING': set(['felix-FIP-SNAT'])
            },
            async=False)

        self.assertEqual(m_v4_nat_upd.ensure_rule_inserted.mock_calls, [
            call(
                "POSTROUTING --out-interface tunl0 "
                "-m addrtype ! --src-type LOCAL --limit-iface-out "
                "-m addrtype --src-type LOCAL "
                "-j MASQUERADE",
                async=False),
            call("PREROUTING --jump felix-PREROUTING", async=False),
            call("POSTROUTING --jump felix-POSTROUTING", async=False)
        ])

        m_v6_raw_upd.ensure_rule_inserted.assert_called_once_with(
            'PREROUTING --in-interface tap+ --match rpfilter --invert --jump '
            'felix-PREROUTING',
            async=False,
        )
        m_v6_raw_upd.rewrite_chains.assert_called_once_with(
            {
                'felix-PREROUTING': [
                    '--append felix-PREROUTING --jump DROP -m comment '
                    '--comment "IPv6 rpfilter failed"'
                ]
            }, {'felix-PREROUTING': {}},
            async=False)

        m_ipset.ensure_exists.assert_called_once_with()
        self.assertEqual(m_check_call.mock_calls, [
            call(["ip", "tunnel", "add", "tunl0", "mode", "ipip"]),
            call(["ip", "link", "set", "tunl0", "mtu", "1480"]),
            call(["ip", "link", "set", "tunl0", "up"]),
        ])
        self.assertEqual(m_set_ips.mock_calls,
                         [call(IPV4, "tunl0", set([IPAddress("10.0.0.1")]))])

        expected_chains = {
            'felix-INPUT': [
                '--append felix-INPUT ! --in-interface tap+ --jump RETURN',
                '--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
                '--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
                '--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
                '--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
                '--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
                '--append felix-INPUT --jump felix-FROM-ENDPOINT'
            ],
            'felix-FORWARD': [
                '--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
                '--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
                '--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
                '--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
                '--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
                '--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
                '--append felix-FORWARD --jump ACCEPT --in-interface tap+',
                '--append felix-FORWARD --jump ACCEPT --out-interface tap+'
            ]
        }
        m_v4_upd.rewrite_chains.assert_called_once_with(
            expected_chains, {
                'felix-INPUT': set(['felix-FROM-ENDPOINT']),
                'felix-FORWARD': set(
                    ['felix-FROM-ENDPOINT', 'felix-TO-ENDPOINT'])
            },
            async=False)

        self.assertEqual(m_v4_upd.ensure_rule_inserted.mock_calls, [
            call("INPUT --jump felix-INPUT", async=False),
            call("FORWARD --jump felix-FORWARD", async=False),
        ])
예제 #34
0
	def tcpfuzz(self):
		buf = ''
		try:
			self.target = str(IPAddress(self.target))
		except AddrFormatError as e:
			try:
				self.target = socket.gethostbyname(self.target)
			except Exception as e:
				print "[-] Select a valid IP Address as target."
				print "[!] Exception caught: {}".format(e)
				return

		buf = '\x41'*self.offset
		print "[+] TCP fuzzing initialized, wait untill crash."
		while True:
			try:
				self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
				self.socket.settimeout(2)
				self.socket.connect((self.target,self.port))
				print "[+] Fuzzing with [{}] bytes.".format(len(buf))
				try:
					response = self.socket.recv(1024)
					print "[*] Response: {}".format(response)
					self.socket.send(buf)
					try:
						response = self.socket.recv(1024)
						print "[*] Response: {}".format(response)
						self.socket.close()
						buf += '\x41'*self.offset
					except:
						self.socket.close()
						buf += '\x41'*self.offset
				except:
					self.socket.send(buf)
					try:
						response = self.socket.recv(1024)
						print "[*] Response: {}".format(response)
						self.socket.close()
						buf += '\x41'*self.offset

					except:
						self.socket.close()
						buf += '\x41'*self.offset

			except KeyboardInterrupt:
				break
			except Exception as e:
				if 'Connection refused' in e:
					print "[-] Connection refused."
					time.sleep(4)

				else:
					try:
						response = self.socket.recv(1024)
						print "[*] Response: {}".format(response)
					except Exception as e:
						if 'timed out' in e:
							print "[-] Timed out."
							time.sleep(2)
					print "[+] Crash occured with buffer length: {}".format(str(len(buf)))
					print "[!] Exception caught: {}".format(e)
예제 #35
0
    def resourceList(self, id, target, targetType):
        targetDom = ''
        # Get the base domain if we're supplied a domain
        if targetType == "domain":
            targetDom = self.sf.hostDomain(target, self.opts['_internettlds'])
            if not targetDom:
                return None

        for check in list(malchecks.keys()):
            cid = malchecks[check]['id']
            if id == cid:
                data = dict()
                url = malchecks[check]['url'].format(self.opts['confidenceminimum'])
                hdr = {
                    'Key': self.opts['api_key'],
                    'Accept': "text/plain"
                }
                data = self.sf.fetchUrl(url, timeout=self.opts['_fetchtimeout'],
                                        useragent=self.opts['_useragent'], headers=hdr)
                if data['content'] is None:
                    self.sf.error("Unable to fetch " + url, False)
                    return None

                url = "https://www.abuseipdb.com/check/" + target

                # If we're looking at netblocks
                if targetType == "netblock":
                    iplist = list()
                    # Get the regex, replace {0} with an IP address matcher to
                    # build a list of IP.
                    # Cycle through each IP and check if it's in the netblock.
                    if 'regex' in malchecks[check]:
                        rx = malchecks[check]['regex'].replace("{0}", r"(\d+\.\d+\.\d+\.\d+)")
                        pat = re.compile(rx, re.IGNORECASE)
                        self.sf.debug("New regex for " + check + ": " + rx)
                        for line in data['content'].split('\n'):
                            grp = re.findall(pat, line)
                            if len(grp) > 0:
                                # self.sf.debug("Adding " + grp[0] + " to list.")
                                iplist.append(grp[0])
                    else:
                        iplist = data['content'].split('\n')

                    for ip in iplist:
                        if len(ip) < 8 or ip.startswith("#"):
                            continue
                        ip = ip.strip()

                        try:
                            if IPAddress(ip) in IPNetwork(target):
                                self.sf.debug(f"{ip} found within netblock/subnet {target} in {check}")
                                return url
                        except Exception as e:
                            self.sf.debug(f"Error encountered parsing: {e}")
                            continue

                    return None

                # If we're looking at hostnames/domains/IPs
                if 'regex' not in malchecks[check]:
                    for line in data['content'].split('\n'):
                        if line == target or (targetType == "domain" and line == targetDom):
                            self.sf.debug(target + "/" + targetDom + " found in " + check + " list.")
                            return url
                else:
                    # Check for the domain and the hostname
                    try:
                        rxDom = str(malchecks[check]['regex']).format(targetDom)
                        rxTgt = str(malchecks[check]['regex']).format(target)
                        for line in data['content'].split('\n'):
                            if (targetType == "domain" and re.match(rxDom, line, re.IGNORECASE)) or \
                                    re.match(rxTgt, line, re.IGNORECASE):
                                self.sf.debug(target + "/" + targetDom + " found in " + check + " list.")
                                return url
                    except BaseException as e:
                        self.sf.debug("Error encountered parsing 2: " + str(e))
                        continue

        return None
예제 #36
0
def is_public_ip(ip):
    ip = IPAddress(ip)
    if (ip in IPNetwork("10.0.0.0/8") or ip in IPNetwork("172.16.0.0/12")
            or ip in IPNetwork("192.168.0.0/16")):
        return False
    return True
예제 #37
0
def run_bgpstream_beta_bmp(prefixes_file=None):
    """
    Retrieve all elements related to a list of prefixes
    https://bgpstream.caida.org/docs/api/pybgpstream/_pybgpstream.html

    :param prefixes_file: <str> input prefix json

    :return: -
    """

    prefixes = load_json(prefixes_file)
    assert prefixes is not None

    # create a new bgpstream instance
    stream = _pybgpstream.BGPStream()

    # set BMP data interface
    stream.set_data_interface("beta-bmp-stream")

    # filter prefixes
    for prefix in prefixes:
        stream.add_filter("prefix", prefix)

    # filter record type
    stream.add_filter("record-type", "updates")

    # set live mode
    stream.set_live_mode()

    # start the stream
    stream.start()

    with Connection(RABBITMQ_URI) as connection:
        exchange = Exchange("bgp-update",
                            channel=connection,
                            type="direct",
                            durable=False)
        exchange.declare()
        producer = Producer(connection)
        validator = mformat_validator()
        while True:
            # get next record
            try:
                rec = stream.get_next_record()
            except BaseException:
                continue
            if (rec.status != "valid") or (rec.type != "update"):
                continue

            # get next element
            try:
                elem = rec.get_next_elem()
            except BaseException:
                continue

            while elem:
                if elem.type in {"A", "W"}:
                    redis.set(
                        "betabmp_seen_bgp_update",
                        "1",
                        ex=int(
                            os.getenv(
                                "MON_TIMEOUT_LAST_BGP_UPDATE",
                                DEFAULT_MON_TIMEOUT_LAST_BGP_UPDATE,
                            )),
                    )
                    this_prefix = str(elem.fields["prefix"])
                    service = "betabmp|{}|{}".format(str(rec.project),
                                                     str(rec.collector))
                    type_ = elem.type
                    if type_ == "A":
                        as_path = elem.fields["as-path"].split(" ")
                        communities = [{
                            "asn": int(comm.split(":")[0]),
                            "value": int(comm.split(":")[1]),
                        } for comm in elem.fields["communities"]]
                    else:
                        as_path = []
                        communities = []
                    timestamp = float(rec.time)
                    peer_asn = elem.peer_asn

                    for prefix in prefixes:
                        base_ip, mask_length = this_prefix.split("/")
                        our_prefix = IPNetwork(prefix)
                        if (IPAddress(base_ip) in our_prefix
                                and int(mask_length) >= our_prefix.prefixlen):
                            msg = {
                                "type": type_,
                                "timestamp": timestamp,
                                "path": as_path,
                                "service": service,
                                "communities": communities,
                                "prefix": this_prefix,
                                "peer_asn": peer_asn,
                            }
                            if validator.validate(msg):
                                msgs = normalize_msg_path(msg)
                                for msg in msgs:
                                    key_generator(msg)
                                    log.debug(msg)
                                    producer.publish(
                                        msg,
                                        exchange=exchange,
                                        routing_key="update",
                                        serializer="json",
                                    )
                            else:
                                log.warning(
                                    "Invalid format message: {}".format(msg))
                try:
                    elem = rec.get_next_elem()
                except BaseException:
                    continue
예제 #38
0
파일: website.py 프로젝트: UltrosBot/Ultros
    def callback(self, result, url, context, session):
        response = result[0]
        content = result[1]

        self.plugin.logger.trace(
            "Headers: {0}", list(response.headers)
        )

        self.plugin.logger.trace("HTTP code: {0}", response.status_code)

        new_url = urlparse.urlparse(response.url)

        if self.url_can_resolve(url):
            try:
                ip = yield self.resolver.get_host_by_name(new_url.hostname)
                ip = IPAddress(ip)
            except Exception:
                # context["event"].target.respond(
                #     u'[Error] Failed to handle URL: {}'.format(
                #         url.to_string()
                #     )
                # )

                self.plugin.logger.exception("Error while checking DNS")
                returnValue(STOP_HANDLING)
                return

            if ip.is_loopback() or ip.is_private() or ip.is_link_local() \
                    or ip.is_multicast():
                self.plugin.logger.warn(
                    "Prevented connection to private/internal address"
                )

                returnValue(STOP_HANDLING)
                return

        if content is None:
            self.plugin.logger.debug("No content returned")
            return

        soup = BeautifulSoup(content)

        if soup.title and soup.title.text:
            title = soup.title.text.strip()
            title = re.sub("[\n\s]+", " ", title)
            title = to_unicode(title)

            title_limit = self.urls_plugin.config.get("max_title_length", 150)

            if len(title) > title_limit:
                title = title[:title_limit - 15] + u"... (truncated)"

            if response.status_code == requests.codes.ok:
                context["event"].target.respond(
                    u'"{0}" at {1}'.format(
                        title, new_url.hostname
                    )
                )
            else:
                context["event"].target.respond(
                    u'[HTTP {0}] "{1}" at {2}'.format(
                        response.status_code,
                        title, new_url.hostname
                    )
                )

        else:
            if response.status_code != requests.codes.ok:
                context["event"].target.respond(
                    u'HTTP Error {0}: "{1}" at {2}'.format(
                        response.status_code,
                        STATUS_CODES.get(response.status_code, "Unknown"),
                        new_url.hostname
                    )
                )
            else:
                self.plugin.logger.debug("No title")

        self.save_session(session)
예제 #39
0
    def net_check_subnet(self, req_vn_dict):
        ipam_refs = req_vn_dict.get('network_ipam_refs', [])
        for ipam_ref in ipam_refs:
            vnsn_data = ipam_ref['attr']
            ipam_subnets = vnsn_data['ipam_subnets']
            for ipam_subnet in ipam_subnets:
                subnet_dict = copy.deepcopy(ipam_subnet['subnet'])
                prefix = subnet_dict['ip_prefix']
                prefix_len = subnet_dict['ip_prefix_len']
                network = IPNetwork('%s/%s' % (prefix, prefix_len))
                subnet_name = subnet_dict['ip_prefix'] + '/' + str(
                    subnet_dict['ip_prefix_len'])

                # check subnet-uuid
                ipam_cfg_subnet_uuid = ipam_subnet.get('subnet_uuid', None)
                try:
                    if ipam_cfg_subnet_uuid:
                        subnet_uuid = uuid.UUID(ipam_cfg_subnet_uuid)
                except ValueError:
                    err_msg = "Invalid subnet-uuid %s in subnet:%s" \
                        %(ipam_cfg_subnet_uuid, subnet_name)
                    return False, err_msg

                # check allocation-pool
                alloc_pools = ipam_subnet.get('allocation_pools', None)
                for pool in alloc_pools or []:
                    try:
                        iprange = IPRange(pool['start'], pool['end'])
                    except AddrFormatError:
                        err_msg = "Invalid allocation Pool start:%s, end:%s in subnet:%s" \
                            %(pool['start'], pool['end'], subnet_name)
                        return False, err_msg
                    if iprange not in network:
                        err_msg = "allocation pool start:%s, end:%s out of cidr:%s" \
                            %(pool['start'], pool['end'], subnet_name)
                        return False, err_msg

                # check gw
                gw = ipam_subnet.get('default_gateway', None)
                if gw is not None:
                    try:
                        gw_ip = IPAddress(gw)
                    except AddrFormatError:
                        err_msg = "Invalid gateway Ip address:%s" \
                            %(gw)
                        return False, err_msg
                    if (gw_ip != IPAddress('0.0.0.0') and
                            gw_ip != IPAddress('::') and
                            (gw_ip < IPAddress(network.first + 1) or
                                gw_ip > IPAddress(network.last - 1))):
                        err_msg = "gateway Ip %s out of cidr: %s" \
                            %(gw, subnet_name)
                        return False, err_msg

                # check service address
                service_address = ipam_subnet.get('dns_server_address', None)
                if service_address is not None:
                    try:
                        service_node_address = IPAddress(service_address)
                    except AddrFormatError:
                        err_msg = "Invalid Dns Server Ip address:%s" \
                            %(service_address)
                        return False, err_msg
        return True, ""
예제 #40
0
 def ip_reset_in_use(self, ipaddr):
     ip = IPAddress(ipaddr)
     addr = int(ip)
     return self._db_conn.subnet_reset_in_use(self._name, addr)
예제 #41
0
    def handleEvent(self, event):
        eventName = event.eventType
        srcModuleName = event.module
        eventData = event.data
        eventDataHash = self.sf.hashstring(eventData)
        addrs = None
        parentEvent = event

        self.sf.debug(f"Received event, {eventName}, from {srcModuleName}")

        if eventDataHash in self.events:
            return None

        self.events[eventDataHash] = True

        try:
            ip = IPAddress(eventData)
        except Exception:
            self.sf.error(f"Invalid IP address received: {eventData}")
            return None

        try:
            minip = IPAddress(int(ip) - self.opts['lookasidecount'])
            maxip = IPAddress(int(ip) + self.opts['lookasidecount'])
        except Exception:
            self.sf.error(f"Received an invalid IP address: {eventData}")
            return None

        self.sf.debug("Lookaside max: " + str(maxip) + ", min: " + str(minip))
        s = int(minip)
        c = int(maxip)

        while s <= c:
            sip = str(IPAddress(s))
            self.sf.debug("Attempting look-aside lookup of: " + sip)
            if self.checkForStop():
                return None

            if sip in self.hostresults or sip == eventData:
                s += 1
                continue

            addrs = self.sf.resolveIP(sip)
            if not addrs:
                self.sf.debug("Look-aside resolve for " + sip + " failed.")
                s += 1
                continue

            # Report addresses that resolve to hostnames on the same
            # domain or sub-domain as the target.
            if self.getTarget().matches(sip):
                affil = False
            else:
                affil = True
                for a in addrs:
                    if self.getTarget().matches(a):
                        affil = False

            # Generate the event for the look-aside IP, but don't let it re-trigger
            # this module by adding it to self.events first.
            self.events[sip] = True
            ev = self.processHost(sip, parentEvent, affil)

            for addr in addrs:
                if self.checkForStop():
                    return None

                if addr == sip:
                    continue
                if self.sf.validIP(addr):
                    parent = parentEvent
                else:
                    # Hostnames from the IP need to be linked to the IP
                    parent = ev

                if self.getTarget().matches(addr):
                    # Generate an event for the IP, then
                    # let the handling by this module take
                    # care of follow-up processing.
                    self.processHost(addr, parent, False)
                else:
                    self.processHost(addr, parent, True)
            s += 1
예제 #42
0
파일: ipgeo.py 프로젝트: xiaobo-linux/We
def ipgeo(ip):
    address = IPAddress(ip)

    for network, name in zip(ip_network, ip_name):
        if address in network:
            return name
예제 #43
0
    def __init__(self, name, prefix, prefix_len,
                 gw=None, service_address=None, enable_dhcp=True,
                 dns_nameservers=None,
                 alloc_pool_list=None,
                 addr_from_start=False,
                 should_persist=True,
                 ip_alloc_unit=1):

        network = IPNetwork('%s/%s' % (prefix, prefix_len))

        # check allocation-pool
        for ip_pool in alloc_pool_list or []:
            try:
                start_ip = IPAddress(ip_pool['start'])
                end_ip = IPAddress(ip_pool['end'])
            except AddrFormatError:
                raise AddrMgmtInvalidIpAddr(name, ip_pool)
            if (start_ip not in network or end_ip not in network):
                raise AddrMgmtOutofBoundAllocPool(name, ip_pool)
            if (end_ip < start_ip):
                raise AddrMgmtInvalidAllocPool(name, ip_pool)

        # check gw
        if gw:
            try:
                gw_ip = IPAddress(gw)
            except AddrFormatError:
                raise AddrMgmtInvalidGatewayIp(name, gw_ip)

        else:
            # reserve a gateway ip in subnet
            if addr_from_start:
                gw_ip = IPAddress(network.first + 1)
            else:
                gw_ip = IPAddress(network.last - 1)

        # check service_address
        if service_address:
            try:
                service_node_address = IPAddress(service_address)
            except AddrFormatError:
                raise AddrMgmtInvalidServiceNodeIp(name, service_node_address)

        else:
            # reserve a service address ip in subnet
            if addr_from_start:
                service_node_address = IPAddress(network.first + 2)
            else:
                service_node_address = IPAddress(network.last - 2)

        # check dns_nameservers
        for nameserver in dns_nameservers or []:
            try:
                ip_addr = IPAddress(nameserver)
            except AddrFormatError:
                raise AddrMgmtInvalidDnsServer(name, nameserver)

        # check allocation-unit
        # alloc-unit should be power of 2
        if (ip_alloc_unit & (ip_alloc_unit-1)):
            raise AddrMgmtAllocUnitInvalid(name, prefix+'/'+prefix_len,
                                           ip_alloc_unit)

        # if allocation-pool is not specified, create one with entire cidr
        no_alloc_pool = False
        if not alloc_pool_list:
            alloc_pool_list = [{'start': str(IPAddress(network.first)),
                                'end': str(IPAddress(network.last-1))}]
            no_alloc_pool = True


        # need alloc_pool_list with integer to use in Allocator
        alloc_int_list = list()
        # store integer for given ip address in allocation list
        for alloc_pool in alloc_pool_list:
            alloc_int = {'start': int(IPAddress(alloc_pool['start'])),
                         'end': int(IPAddress(alloc_pool['end']))}
            alloc_int_list.append(alloc_int)

        # check alloc_pool starts at integer multiple of alloc_unit
        if ip_alloc_unit is not 1:
            for alloc_int in alloc_int_list:
                if alloc_int['start'] % ip_alloc_unit:
                    raise AddrMgmtAllocUnitInvalid(name, prefix+'/'+prefix_len,
                                                   ip_alloc_unit)

        # go through each alloc_pool and validate allocation pool
        # given ip_alloc_unit
        net_ip = IPAddress(network.first)
        bc_ip = IPAddress(network.last)
        for alloc_pool in alloc_pool_list:
            if no_alloc_pool:
                start_ip = IPAddress(network.first)
                end_ip = IPAddress(network.last)
            else:
                start_ip = IPAddress(alloc_pool['start'])
                end_ip = IPAddress(alloc_pool['end'])

            alloc_pool_range = int(end_ip) - int(start_ip) + 1
            # each alloc-pool range should be integer multiple of ip_alloc_unit
            if (alloc_pool_range < ip_alloc_unit):
                raise AddrMgmtAllocUnitInvalid(name, prefix+'/'+prefix_len,
                                               ip_alloc_unit)
            if (alloc_pool_range % ip_alloc_unit):
                raise AddrMgmtAllocUnitInvalid(name, prefix+'/'+prefix_len,
                                               ip_alloc_unit)

            block_alloc_unit = 0
            if (net_ip == start_ip):
                block_alloc_unit += 1
                if (bc_ip == end_ip):
                    if (int(end_ip) - int(start_ip) >= ip_alloc_unit):
                        block_alloc_unit += 1
            else:
                if (bc_ip == end_ip):
                    block_alloc_unit += 1

            if (gw_ip >= start_ip and gw_ip <= end_ip):
                #gw_ip is part of this alloc_pool, block another alloc_unit
                # only if gw_ip is not a part of already counted block units.
                if ((int(gw_ip) - int(start_ip) >= ip_alloc_unit) and 
                    (int(end_ip) - int(gw_ip) >= ip_alloc_unit)):
                    block_alloc_unit += 1

            if (service_node_address >=start_ip and
                service_node_address <= end_ip):
                #service_node_address is part of this alloc_pool,
                #block another alloc_unit only if service_node_address is not
                #part of already counted block units.
                if ((int(service_node_address) -
                     int(start_ip) >= ip_alloc_unit) and
                    (int(end_ip) -
                     int(service_node_address) >= ip_alloc_unit) and
                    (abs(int(gw_ip) -
                         int(service_node_address)) > ip_alloc_unit)):
                    block_alloc_unit += 1

            # each alloc-pool should have minimum block_alloc_unit+1,
            # possible allocation
            if (alloc_pool_range/ip_alloc_unit) <= block_alloc_unit:
                raise AddrMgmtAllocUnitInvalid(name, prefix+'/'+prefix_len,
                                               ip_alloc_unit)

        # Exclude host and broadcast
        exclude = [IPAddress(network.first), network.broadcast]

        #check size of subnet or individual alloc_pools are multiple of alloc_unit

        # exclude gw_ip, service_node_address if they are within
        # allocation-pool
        for alloc_int in alloc_int_list:
            if alloc_int['start'] <= int(gw_ip) <= alloc_int['end']:
                exclude.append(gw_ip)
            if (alloc_int['start'] <= int(service_node_address)
                    <= alloc_int['end']):
                exclude.append(service_node_address)
        # ip address allocator will be per alloc-unit
        self._db_conn.subnet_create_allocator(name, alloc_int_list,
                                              addr_from_start,
                                              should_persist,
                                              network.first,
                                              network.size,
                                              ip_alloc_unit)

        # reserve excluded addresses
        for addr in exclude:
            if should_persist:
                self._db_conn.subnet_reserve_req(name, int(addr)/ip_alloc_unit,
                                                 'system-reserved')
            else:
                self._db_conn.subnet_set_in_use(name, int(addr))

        self._name = name
        self._network = network
        self._version = network.version
        self._exclude = exclude
        self.gw_ip = gw_ip
        self.dns_server_address = service_node_address
        self._alloc_pool_list = alloc_pool_list
        self.enable_dhcp = enable_dhcp
        self.dns_nameservers = dns_nameservers
        self.alloc_unit = ip_alloc_unit
        self._prefix = prefix
        self._prefix_len = prefix_len
예제 #44
0
def nodes_by_interface(interfaces_label_map,
                       include_filter=None,
                       preconfigured=True):
    """Determines the set of nodes that match the specified
    LabeledConstraintMap (which must be a map of interface constraints.)

    Returns a dictionary in the format:
    {
        <label1>: {
            <node1>: [<interface1>, <interface2>, ...]
            <node2>: ...
            ...
        }
        <label2>: ...
    }

    :param interfaces_label_map: LabeledConstraintMap
    :param include_filter: A dictionary suitable for passing into the Django
        QuerySet filter() arguments, representing the set of initial interfaces
        to filter.
    :param preconfigured: If True, assumes that the specified constraint values
        have already been configured. If False, also considers nodes whose
        VLANs (but not necessarily subnets or IP addresses) match, so that
        the node can be configured per the constraints post-allocation.
    :return: NodesByInterfaceResult object
    """
    node_ids = None
    label_map = {}
    allocated_ips = {}
    ip_modes = {}
    for label in interfaces_label_map:
        constraints = interfaces_label_map[label]
        if not preconfigured:
            # This code path is used for pods, where the machine doesn't yet
            # exist, but will be created based on the constraints.
            if 'ip' in constraints:
                vlan_constraints = constraints.pop('vlan', [])
                ip_constraints = constraints.pop('ip')
                for ip in ip_constraints:
                    allocations_by_label = allocated_ips.pop(label, [])
                    allocations_by_label.append(str(IPAddress(ip)))
                    allocated_ips[label] = allocations_by_label
                    subnet = Subnet.objects.get_best_subnet_for_ip(ip)
                    # Convert the specified IP address constraint into a VLAN
                    # constraint. At this point, we don't care if the IP
                    # address matches. We only care that we have allocated
                    # an IP address on a VLAN that will exist on the composed
                    # machine.
                    vlan_constraints.append('id:%d' % subnet.vlan.id)
                constraints['vlan'] = vlan_constraints
            if 'mode' in constraints:
                mode_constraints = constraints.pop('mode')
                for mode in mode_constraints:
                    # This will be used later when a subnet is selected.
                    ip_modes[label] = mode
        if node_ids is None:
            # The first time through the filter, build the list
            # of candidate nodes.
            node_ids, node_map = Interface.objects.get_matching_node_map(
                constraints, include_filter=include_filter)
            label_map[label] = node_map
        else:
            # For subsequent labels, only match nodes that already matched a
            # preceding label. Use the set intersection operator to do this,
            # because that will yield more complete data in the label_map.
            # (which is less efficient, but may be needed for troubleshooting.)
            # If a more efficient approach is desired, this could be changed
            # to filter the nodes starting from an 'id__in' filter using the
            # current 'node_ids' set.
            new_node_ids, node_map = Interface.objects.get_matching_node_map(
                constraints, include_filter=include_filter)
            label_map[label] = node_map
            node_ids &= new_node_ids
    return NodesByInterfaceResult(node_ids=node_ids,
                                  label_map=label_map,
                                  allocated_ips=allocated_ips,
                                  ip_modes=ip_modes)
예제 #45
0
    def ip_alloc_req(self, vn_fq_name, vn_dict=None, sub=None, asked_ip_addr=None,
                     asked_ip_version=4, alloc_id=None):
        vn_fq_name_str = ':'.join(vn_fq_name)
        subnet_dicts = self._get_subnet_dicts(vn_fq_name, vn_dict)

        if not subnet_dicts:
            raise AddrMgmtSubnetUndefined(vn_fq_name_str)

        current_count = 0
        subnet_count = len(subnet_dicts)
        for subnet_name in subnet_dicts:
            current_count += 1
            if sub and sub != subnet_name:
                continue

            # create subnet_obj internally if it was created by some other
            # api-server before
            try:
                subnet_obj = self._subnet_objs[vn_fq_name_str][subnet_name]
            except KeyError:
                if vn_fq_name_str not in self._subnet_objs:
                    self._subnet_objs[vn_fq_name_str] = {}

                subnet_dict = subnet_dicts[subnet_name]
                subnet_obj = Subnet('%s:%s' % (vn_fq_name_str,
                                               subnet_name),
                                    subnet_dict['ip_prefix'],
                                    subnet_dict['ip_prefix_len'],
                                    gw=subnet_dict['gw'],
                                    service_address=subnet_dict['dns_server_address'],
                                    enable_dhcp=subnet_dict['enable_dhcp'],
                                    dns_nameservers=subnet_dict['dns_nameservers'],
                                    alloc_pool_list=subnet_dict['allocation_pools'],
                                    addr_from_start = subnet_dict['addr_start'],
                                    should_persist=False,
                                    ip_alloc_unit=subnet_dict['alloc_unit'])
                self._subnet_objs[vn_fq_name_str][subnet_name] = subnet_obj

            if asked_ip_version and asked_ip_version != subnet_obj.get_version():
                continue
            if asked_ip_addr == str(subnet_obj.gw_ip):
                return asked_ip_addr
            if asked_ip_addr == str(subnet_obj.dns_server_address):
                return asked_ip_addr
            if asked_ip_addr and not subnet_obj.ip_belongs(asked_ip_addr):
                continue

            # if user requests ip-addr and that can't be reserved due to
            # existing object(iip/fip) using it, return an exception with
            # the info. client can determine if its error or not
            if asked_ip_addr:
                if (int(IPAddress(asked_ip_addr)) % subnet_obj.alloc_unit):
                    raise AddrMgmtAllocUnitInvalid(
                        subnet_obj._name,
                        subnet_obj._prefix+'/'+subnet_obj._prefix_len,
                        subnet_obj.alloc_unit)

                return subnet_obj.ip_reserve(ipaddr=asked_ip_addr,
                                             value=alloc_id)

            try:
                ip_addr = subnet_obj.ip_alloc(ipaddr=None,
                                              value=alloc_id)
            except cfgm_common.exceptions.ResourceExhaustionError as e:
                # ignore exception if it not a last subnet
                self.config_log("In ip_alloc_req: %s" %(str(e)),
                                level=SandeshLevel.SYS_DEBUG)
                if current_count < subnet_count:
                    continue
                else:
                    raise AddrMgmtSubnetExhausted(vn_fq_name, 'all')

            if ip_addr is not None or sub:
                return ip_addr

        raise AddrMgmtSubnetExhausted(vn_fq_name, 'all')
예제 #46
0
    def resourceList(self, id, target, targetType):
        targetDom = ''
        # Get the base domain if we're supplied a domain
        if targetType == "domain":
            targetDom = self.sf.hostDomain(target, self.opts['_internettlds'])

        for check in list(malchecks.keys()):
            cid = malchecks[check]['id']
            if id == cid and malchecks[check]['type'] == "list":
                data = dict()
                url = malchecks[check]['url']
                data['content'] = self.sf.cacheGet(
                    "sfmal_" + cid, self.opts.get('cacheperiod', 0))
                if data['content'] is None:
                    data = self.sf.fetchUrl(url,
                                            timeout=self.opts['_fetchtimeout'],
                                            useragent=self.opts['_useragent'])
                    if data['content'] is None:
                        self.sf.error("Unable to fetch " + url, False)
                        return None
                    else:
                        self.sf.cachePut("sfmal_" + cid, data['content'])

                # If we're looking at netblocks
                if targetType == "netblock":
                    iplist = list()
                    # Get the regex, replace {0} with an IP address matcher to
                    # build a list of IP.
                    # Cycle through each IP and check if it's in the netblock.
                    if 'regex' in malchecks[check]:
                        rx = malchecks[check]['regex'].replace(
                            "{0}", "(\d+\.\d+\.\d+\.\d+)")
                        pat = re.compile(rx, re.IGNORECASE)
                        self.sf.debug("New regex for " + check + ": " + rx)
                        for line in data['content'].split('\n'):
                            grp = re.findall(pat, line)
                            if len(grp) > 0:
                                #self.sf.debug("Adding " + grp[0] + " to list.")
                                iplist.append(grp[0])
                    else:
                        iplist = data['content'].split('\n')

                    for ip in iplist:
                        if len(ip) < 8 or ip.startswith("#"):
                            continue
                        ip = ip.strip()

                        try:
                            if IPAddress(ip) in IPNetwork(target):
                                self.sf.debug(
                                    ip + " found within netblock/subnet " +
                                    target + " in " + check)
                                return url
                        except Exception as e:
                            self.sf.debug("Error encountered parsing: " +
                                          str(e))
                            continue

                    return None

                # If we're looking at hostnames/domains/IPs
                if 'regex' not in malchecks[check]:
                    for line in data['content'].split('\n'):
                        if line == target or (targetType == "domain"
                                              and line == targetDom):
                            self.sf.debug(target + "/" + targetDom +
                                          " found in " + check + " list.")
                            return url
                else:
                    # Check for the domain and the hostname
                    try:
                        rxDom = str(
                            malchecks[check]['regex']).format(targetDom)
                        rxTgt = str(malchecks[check]['regex']).format(target)
                        for line in data['content'].split('\n'):
                            if (targetType == "domain" and re.match(rxDom, line, re.IGNORECASE)) or \
                                    re.match(rxTgt, line, re.IGNORECASE):
                                self.sf.debug(target + "/" + targetDom +
                                              " found in " + check + " list.")
                                return url
                    except BaseException as e:
                        self.sf.debug("Error encountered parsing 2: " + str(e))
                        continue

        return None
예제 #47
0
def warn_loopback(ip):
    """Warn if the given IP address is in the loopback network."""
    if IPAddress(ip).is_loopback():
        logger.warning(WARNING_MESSAGE % ip)
예제 #48
0
 def is_ip_allocated(self, ipaddr):
     ip = IPAddress(ipaddr)
     addr = int(ip)
     return self._db_conn.subnet_is_addr_allocated(self._name, addr)
예제 #49
0
    def cli(self, output=None):
        if output is None:
            out = self.device.execute(self.cli_command)
        else:
            out = output
        # Init vars
        ret_dict = {}

        for line in out.splitlines():
            line = line.strip()

            # Routing Protocol OSPF 1
            p1 = re.compile(
                r'^Routing +Protocol +(?P<pro>OSPF|OSPFv3) +(?P<pid>(\S+))$')
            m = p1.match(line)
            if m:
                instance = str(m.groupdict()['pid'])
                if 'protocols' not in ret_dict:
                    ret_dict['protocols'] = {}
                pro = m.groupdict()['pro'].lower()
                if pro not in ret_dict['protocols']:
                    ret_dict['protocols'][pro] = {}
                if 'vrf' not in ret_dict['protocols'][pro]:
                    ret_dict['protocols'][pro]['vrf'] = {}
                if 'default' not in ret_dict['protocols'][pro]['vrf']:
                    ret_dict['protocols'][pro]['vrf']['default'] = {}
                if 'address_family' not in ret_dict['protocols'][pro]['vrf']\
                        ['default']:
                    ret_dict['protocols'][pro]['vrf']['default']\
                        ['address_family'] = {}
                if 'ipv4' not in ret_dict['protocols'][pro]['vrf']['default']\
                        ['address_family']:
                    ret_dict['protocols'][pro]['vrf']['default']\
                        ['address_family']['ipv4'] = {}
                if 'instance' not in ret_dict['protocols'][pro]['vrf']\
                        ['default']['address_family']['ipv4']:
                    ret_dict['protocols'][pro]['vrf']['default']\
                        ['address_family']['ipv4']['instance'] = {}
                if instance not in ret_dict['protocols'][pro]['vrf']\
                        ['default']['address_family']['ipv4']['instance']:
                    ret_dict['protocols'][pro]['vrf']['default']\
                        ['address_family']['ipv4']['instance'][instance] = {}
                # Set ospf_dict
                ospf_dict = ret_dict['protocols'][pro]['vrf']['default']\
                        ['address_family']['ipv4']['instance'][instance]
                continue

            # Router Id: 3.3.3.3
            p2 = re.compile(r'^Router +Id: +(?P<router_id>(\S+))$')
            m = p2.match(line)
            if m:
                ospf_dict['router_id'] = str(m.groupdict()['router_id'])
                continue

            # Distance: 110
            p3_1 = re.compile(r'^Distance: +(?P<distance>(\d+))$')
            m = p3_1.match(line)
            if m:
                try:
                    ospf_dict
                except Exception:
                    continue
                if 'preference' not in ospf_dict:
                    ospf_dict['preference'] = {}
                if 'single_value' not in ospf_dict['preference']:
                    ospf_dict['preference']['single_value'] = {}
                ospf_dict['preference']['single_value']['all'] = \
                    int(m.groupdict()['distance'])
                continue

            # Distance: IntraArea 112 InterArea 113 External/NSSA 114
            p3_2 = re.compile(r'^Distance: +IntraArea +(?P<intra>(\d+))'
                              ' +InterArea +(?P<inter>(\d+)) +External\/NSSA'
                              ' +(?P<external>(\d+))$')
            m = p3_2.match(line)
            if m:
                if 'preference' not in ospf_dict:
                    ospf_dict['preference'] = {}
                if 'multi_values' not in ospf_dict['preference']:
                    ospf_dict['preference']['multi_values'] = {}
                if 'granularity' not in ospf_dict['preference'][
                        'multi_values']:
                    ospf_dict['preference']['multi_values']['granularity'] = {}
                if 'detail' not in ospf_dict['preference']['multi_values']\
                        ['granularity']:
                    ospf_dict['preference']['multi_values']['granularity']\
                        ['detail'] = {}
                ospf_dict['preference']['multi_values']['granularity']\
                        ['detail']['intra_area'] = int(m.groupdict()['intra'])
                ospf_dict['preference']['multi_values']['granularity']\
                        ['detail']['inter_area'] = int(m.groupdict()['inter'])
                ospf_dict['preference']['multi_values']['external'] = \
                    int(m.groupdict()['external'])
                continue

            # Non-Stop Forwarding: Disabled
            p4 = re.compile(r'^Non-Stop +Forwarding:'
                            ' +(?P<nsf>(Disabled|Enabled))$')
            m = p4.match(line)
            if m:
                if 'Disabled' in m.groupdict()['nsf']:
                    ospf_dict['nsf'] = False
                else:
                    ospf_dict['nsf'] = True
                    continue

            # Redistribution:
            #   connected
            #   connected with metric 10
            #   static
            #   static with metric 10
            p14 = re.compile(r'^(?P<type>(connected|static))(?: +with +metric'
                             ' +(?P<metric>(\d+)))?$')
            m = p14.match(line)
            if m:
                the_type = str(m.groupdict()['type'])
                if 'redistribution' not in ospf_dict:
                    ospf_dict['redistribution'] = {}
                if the_type not in ospf_dict['redistribution']:
                    ospf_dict['redistribution'][the_type] = {}
                ospf_dict['redistribution'][the_type]['enabled'] = True
                if m.groupdict()['metric']:
                    ospf_dict['redistribution'][the_type]['metric'] = \
                        int(m.groupdict()['metric'])
                    continue

            # Redistribution:
            #   bgp 100 with metric 111
            #   isis 10 with metric 3333
            p15 = re.compile(r'^(?P<prot>(bgp|isis)) +(?P<pid>(\d+))(?: +with'
                             ' +metric +(?P<metric>(\d+)))?$')
            m = p15.match(line)
            if m:
                prot = str(m.groupdict()['prot'])
                if prot not in ospf_dict['redistribution']:
                    ospf_dict['redistribution'][prot] = {}
                if prot == 'bgp':
                    ospf_dict['redistribution'][prot]['bgp_id'] = \
                        int(m.groupdict()['pid'])
                else:
                    ospf_dict['redistribution'][prot]['isis_pid'] = \
                        str(m.groupdict()['pid'])
                if m.groupdict()['metric']:
                    ospf_dict['redistribution'][prot]['metric'] = \
                        int(m.groupdict()['metric'])
                continue

            # Area 0
            p5 = re.compile(r'^Area +(?P<area>(\S+))$')
            m = p5.match(line)
            if m:
                area = str(IPAddress(str(m.groupdict()['area'])))
                area_interfaces = []
                if 'areas' not in ospf_dict:
                    ospf_dict['areas'] = {}
                if area not in ospf_dict['areas']:
                    ospf_dict['areas'][area] = {}
                ospf_dict['areas'][area]['interfaces'] = area_interfaces
                continue

            # MPLS/TE enabled
            p6 = re.compile(r'^MPLS\/TE +(?P<te>(enabled|disabled))$')
            m = p6.match(line)
            if m:
                if 'mpls' not in ospf_dict['areas'][area]:
                    ospf_dict['areas'][area]['mpls'] = {}
                if 'te' not in ospf_dict['areas'][area]['mpls']:
                    ospf_dict['areas'][area]['mpls']['te'] = {}
                if 'enabled' in m.groupdict()['te']:
                    ospf_dict['areas'][area]['mpls']['te']['enable'] = True
                else:
                    ospf_dict['areas'][area]['mpls']['te']['enable'] = False
                    continue

            # Loopback0
            # GigabitEthernet0/0/0/0
            # GigabitEthernet0/0/0/2
            p6 = re.compile(r'^(?P<intf>(Lo|Gi)[\w\/\.\-]+)$')
            m = p6.match(line)
            if m:
                area_interfaces.append(str(m.groupdict()['intf']))
                ospf_dict['areas'][area]['interfaces'] = area_interfaces
                continue

            # Routing Protocol "BGP 100"
            p8 = re.compile(
                r'^Routing +Protocol +\"BGP +(?P<bgp_pid>(\d+))\"$')
            m = p8.match(line)
            if m:
                if 'protocols' not in ret_dict:
                    ret_dict['protocols'] = {}
                if 'bgp' not in ret_dict['protocols']:
                    ret_dict['protocols']['bgp'] = {}

                # Set sub_dict
                bgp_dict = ret_dict['protocols']['bgp']
                bgp_dict['bgp_pid'] = int(m.groupdict()['bgp_pid'])
                continue

            # Non-stop routing is enabled
            p8 = re.compile(r'^Non-stop +routing +is'
                            ' +(?P<nsr>(enabled|disabled))$')
            m = p8.match(line)
            if m:
                if 'nsr' not in bgp_dict:
                    bgp_dict['nsr'] = {}
                if 'enabled' in m.groupdict()['nsr']:
                    bgp_dict['nsr']['enable'] = True
                else:
                    bgp_dict['nsr']['enable'] = False
                    continue

            # Graceful restart is not enabled
            p9 = re.compile(r'^Graceful restart is not +enabled$')
            m = p9.match(line)
            if m:
                if 'graceful_restart' not in bgp_dict:
                    bgp_dict['graceful_restart'] = {}
                bgp_dict['graceful_restart']['enable'] = False
                continue

            # Current BGP NSR state - Active Ready
            p10 = re.compile(r'^Current +BGP +NSR +state +\-'
                             ' +(?P<state>([a-zA-Z\s]+))$')
            m = p10.match(line)
            if m:
                if 'nsr' not in bgp_dict:
                    bgp_dict['nsr'] = {}
                bgp_dict['nsr']['current_state'] = \
                    str(m.groupdict()['state']).lower()
                continue

            # Address Family VPNv4 Unicast:
            # Address Family VPNv6 Unicast:
            p11 = re.compile(r'^Address +Family +(?P<af>([a-zA-Z0-9\s]+)):$')
            m = p11.match(line)
            if m:
                af = str(m.groupdict()['af']).lower()
                if 'address_family' not in bgp_dict:
                    bgp_dict['address_family'] = {}
                if af not in bgp_dict['address_family']:
                    bgp_dict['address_family'][af] = {}
                    continue

            # Distance: external 20 internal 200 local 200
            p12 = re.compile(r'^Distance: +external +(?P<ext>(\d+)) +internal'
                             ' +(?P<int>(\d+)) +local +(?P<local>(\d+))$')
            m = p12.match(line)
            if m:
                if 'distance' not in bgp_dict['address_family'][af]:
                    bgp_dict['address_family'][af]['distance'] = {}
                bgp_dict['address_family'][af]['distance']['external'] = \
                    int(m.groupdict()['ext'])
                bgp_dict['address_family'][af]['distance']['internal'] = \
                    int(m.groupdict()['int'])
                bgp_dict['address_family'][af]['distance']['local'] = \
                    int(m.groupdict()['local'])
                continue

            # Neighbor      State/Last update received  NSR-State  GR-Enabled
            # 4.4.4.4       08:05:59                    None       No
            # 4.4.4.4       08:05:59                    None       No
            p13 = re.compile(
                r'^(?P<nbr>([0-9\.]+)) +(?P<last_update>([0-9\:]+))'
                ' +(?P<nsr_state>(\S+)) +(?P<gr_en>(No|Yes))$')
            m = p13.match(line)
            if m:
                nbr = str(m.groupdict()['nbr'])
                if 'neighbors' not in bgp_dict['address_family'][af]:
                    bgp_dict['address_family'][af]['neighbors'] = {}
                if nbr not in bgp_dict['address_family'][af]['neighbors']:
                    bgp_dict['address_family'][af]['neighbors'][nbr] = {}
                bgp_dict['address_family'][af]['neighbors'][nbr]['last_update'] = \
                    str(m.groupdict()['last_update'])
                bgp_dict['address_family'][af]['neighbors'][nbr]['nsr_state'] = \
                    str(m.groupdict()['nsr_state'])
                bgp_dict['address_family'][af]['neighbors'][nbr]['gr_enable'] = \
                    str(m.groupdict()['gr_en'])
                continue

        return ret_dict
예제 #50
0
## start sending packets
t = threading.Thread(target=udp_sender,args=(subnet,MAGIC_MESSAGE))
t.start()


try:
    while True:
        # read in a packet
        raw_buffer = sniffer.recvfrom(65535)[0]
        ## create ip header from the first 20 bytes of the buffer
        ip_header = IP(raw_buffer)

        ## print out the protocol that was detected and the hosts
        print("[*] Protocol {}:{} -> {} ".format(ip_header.protocol, ip_header.src_address, ip_header.dst_address))

        if ip_header.protocol == "ICMP":
            ## calculate where our ICMP packet starts
            offset = ip_header.ihl * 4
            buf = raw_buffer[offset:offset + sizeof(ICMP)]
            ## create our ICMP structure 
            icmp_header = ICMP(buf)
            print("[+] ICMP --> Type: {} Code: {} ".format(icmp_header.type, icmp_header.code)) 
            if icmp_header.code == 3 and icmp_header.type == 3:
                if IPAddress(ip_header.src) in IPNetwork(subnet):
                    if raw_buffer[len(raw_buffer)-len(MAGIC_MESSAGE):] == MAGIC_MESSAGE:
                        print("[**] HOST UP {}".format(ip_header.src))  

except KeyboardInterrupt:
    if os.name == 'nt':
        sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
예제 #51
0
def get_connections(cidrs, vpc, outputfilter):
    """
    For a VPC, for each instance, find all of the other instances that can connect to it,
    including those in peered VPCs.
    Note I do not consider subnet ACLs, routing tables, or some other network concepts.
    """
    connections = {}

    # Get mapping of security group names to nodes that have that security group
    sg_to_instance_mapping = {}
    for instance in vpc.leaves:
        for sg in instance.security_groups:
            sg_to_instance_mapping.setdefault(sg, {})[instance] = True

    # For each security group, find all the instances that are allowed to connect to instances
    # within that group.
    for sg in get_sgs(vpc):
        # Get the CIDRs that are allowed to connect
        for cidr in pyjq.all(".IpPermissions[].IpRanges[].CidrIp", sg):
            if not is_external_cidr(cidr):
                # This is a private IP, ex. 10.0.0.0/16

                # See if we should skip this
                if not outputfilter.get("internal_edges", True):
                    continue

                # Find all instances in this VPC and peered VPCs that are in this CIDR
                for sourceVpc in itertools.chain(vpc.peers, (vpc, )):

                    # Ensure it is possible for instances in this VPC to be in the CIDR
                    if not (IPNetwork(sourceVpc.cidr) in IPNetwork(cidr)
                            or IPNetwork(cidr) in IPNetwork(sourceVpc.cidr)):
                        # The CIDR from the security group does not overlap with the CIDR of the VPC,
                        # so skip it
                        continue

                    # For each instance, check if one of its IPs is within the CIDR
                    for sourceInstance in sourceVpc.leaves:
                        for ip in sourceInstance.ips:
                            if IPAddress(ip) in IPNetwork(cidr):
                                # Instance found that can connect to instances in the SG
                                # So connect this instance (sourceInstance) to every instance
                                # in the SG.
                                for targetInstance in sg_to_instance_mapping.get(
                                        sg["GroupId"], {}):
                                    add_connection(connections, sourceInstance,
                                                   targetInstance, sg)

            else:
                # This is an external IP (ie. not in a private range).
                for instance in sg_to_instance_mapping.get(sg["GroupId"], {}):
                    # Ensure it has a public IP, as resources with only private IPs can't be reached
                    if instance.is_public:
                        cidrs[cidr].is_used = True
                        add_connection(connections, cidrs[cidr], instance, sg)
                    else:
                        if cidr == "0.0.0.0/0":
                            # Resource is not public, but allows anything to access it,
                            # so mark set all the resources in the VPC as allowing access to it.
                            for source_instance in vpc.leaves:
                                add_connection(connections, source_instance,
                                               instance, sg)

        if outputfilter.get("internal_edges", True):
            # Connect allowed in Security Groups
            for ingress_sg in pyjq.all(
                    ".IpPermissions[].UserIdGroupPairs[].GroupId", sg):
                # We have an SG and a list of SG's it allows in
                for target in sg_to_instance_mapping.get(sg["GroupId"], {}):
                    # We have an instance and a list of SG's it allows in
                    for source in sg_to_instance_mapping.get(ingress_sg, {}):
                        if (not outputfilter.get("inter_rds_edges", True)
                                and (source.node_type == "rds"
                                     or source.node_type == "rds_rr")
                                and (target.node_type == "rds"
                                     or target.node_type == "rds_rr")):
                            continue
                        add_connection(connections, source, target, sg)

    # Connect everything to the Gateway endpoints
    for targetResource in vpc.leaves:
        if targetResource.has_unrestricted_ingress:
            for sourceVpc in itertools.chain(vpc.peers, (vpc, )):
                for sourceResource in sourceVpc.leaves:
                    add_connection(connections, sourceResource, targetResource,
                                   [])

    # Remove connections for source nodes that cannot initiate traffic (ex. VPC endpoints)
    for connection in list(connections):
        if not connection.source.can_egress:
            del connections[connection]

    return connections
예제 #52
0
    def clean_prevent_dupes_and_overlaps(self):
        """Make sure the new or updated range isn't going to cause a conflict.
        If it will, raise ValidationError.
        """

        # Check against the valid types before going further, since whether
        # or not the range overlaps anything that could cause an error heavily
        # depends on its type.
        valid_types = {choice[0] for choice in IPRANGE_TYPE_CHOICES}

        # If model is incomplete, save() will fail, so don't bother checking.
        if (self.subnet_id is None or self.start_ip is None
                or self.end_ip is None or self.type is None
                or self.type not in valid_types):
            return

        # The _state.adding flag is False if this instance exists in the DB.
        # See https://docs.djangoproject.com/en/1.9/ref/models/instances/.
        if not self._state.adding:
            try:
                orig = IPRange.objects.get(pk=self.id)
            except IPRange.DoesNotExist:
                # The code deletes itself and then tries to add it again to
                # check that it fits. One the second pass of this function
                # call the IPRange does not exist.
                return
            else:
                if orig.type == self.type and (
                        orig.start_ip == self.start_ip) and (orig.end_ip
                                                             == self.end_ip):
                    # Range not materially modified, no range dupe check
                    # required.
                    return

                # Remove existing, check, then re-create.
                self_id = self.id
                # Delete will be rolled back if imminent range checks raise.
                self.delete()
                # Simulate update by setting the ID back to what it was.
                self.id = self_id

        # Reserved ranges can overlap allocated IPs but not other ranges.
        # Dynamic ranges cannot overlap anything (no ranges or IPs).
        if self.type == IPRANGE_TYPE.RESERVED:
            unused = self.subnet.get_ipranges_available_for_reserved_range()
        else:
            unused = self.subnet.get_ipranges_available_for_dynamic_range()

        if len(unused) == 0:
            self._raise_validation_error(
                "There is no room for any %s ranges on this subnet." %
                (self.type))

        message = "Requested %s range conflicts with an existing " % self.type
        if self.type == IPRANGE_TYPE.RESERVED:
            message += "range."
        else:
            message += "IP address or range."

        # Find unused range for start_ip
        for range in unused:
            if IPAddress(self.start_ip) in range:
                if IPAddress(self.end_ip) in range:
                    # Success, start and end IP are in an unused range.
                    return
                else:
                    self._raise_validation_error(message)
        self._raise_validation_error(message)
예제 #53
0
def update_links(node,
                 interface,
                 links,
                 force_vlan=False,
                 use_interface_vlan=True):
    """Update the links on `interface`."""
    interface.ip_addresses.filter(
        alloc_type=IPADDRESS_TYPE.DISCOVERED).delete()
    current_ip_addresses = set(interface.ip_addresses.all())
    updated_ip_addresses = set()
    if use_interface_vlan and interface.vlan is not None:
        vlan = interface.vlan
    elif links:
        fabric = Fabric.objects.create()
        vlan = fabric.get_default_vlan()
        interface.vlan = vlan
        interface.save()
    for link in links:
        if link.get("mode") == "dhcp":
            dhcp_address = get_alloc_type_from_ip_addresses(
                node, IPADDRESS_TYPE.DHCP, current_ip_addresses)
            if dhcp_address is None:
                dhcp_address = StaticIPAddress.objects.create(
                    alloc_type=IPADDRESS_TYPE.DHCP, ip=None, subnet=None)
                dhcp_address.save()
                interface.ip_addresses.add(dhcp_address)
            else:
                current_ip_addresses.remove(dhcp_address)
            if "address" in link:
                # DHCP IP address was discovered. Add it as a discovered
                # IP address.
                ip_network = IPNetwork(f"{link['address']}/{link['netmask']}")
                ip_addr = str(ip_network.ip)

                # Get or create the subnet for this link. If created if
                # will be added to the VLAN on the interface.
                subnet, _ = Subnet.objects.get_or_create(
                    cidr=str(ip_network.cidr),
                    defaults={
                        "name": str(ip_network.cidr),
                        "vlan": vlan
                    },
                )

                # Make sure that the subnet is on the same VLAN as the
                # interface.
                if force_vlan and subnet.vlan_id != interface.vlan_id:
                    maaslog.error(
                        "Unable to update IP address '%s' assigned to "
                        "interface '%s' on controller '%s'. "
                        "Subnet '%s' for IP address is not on "
                        "VLAN '%s.%d'." % (
                            ip_addr,
                            interface.name,
                            node.hostname,
                            subnet.name,
                            subnet.vlan.fabric.name,
                            subnet.vlan.vid,
                        ))
                    continue

                # Create the DISCOVERED IP address.
                ip_address, _ = StaticIPAddress.objects.update_or_create(
                    ip=ip_addr,
                    defaults={
                        "alloc_type": IPADDRESS_TYPE.DISCOVERED,
                        "subnet": subnet,
                    },
                )
                interface.ip_addresses.add(ip_address)
            updated_ip_addresses.add(dhcp_address)
        else:
            ip_network = IPNetwork(f"{link['address']}/{link['netmask']}")
            ip_addr = str(ip_network.ip)

            # Get or create the subnet for this link. If created if will
            # be added to the VLAN on the interface.
            subnet, _ = Subnet.objects.get_or_create(
                cidr=str(ip_network.cidr),
                defaults={
                    "name": str(ip_network.cidr),
                    "vlan": vlan
                },
            )

            # Make sure that the subnet is on the same VLAN as the
            # interface.
            if force_vlan and subnet.vlan_id != interface.vlan_id:
                maaslog.error("Unable to update IP address '%s' assigned to "
                              "interface '%s' on controller '%s'. Subnet '%s' "
                              "for IP address is not on VLAN '%s.%d'." % (
                                  ip_addr,
                                  interface.name,
                                  node.hostname,
                                  subnet.name,
                                  subnet.vlan.fabric.name,
                                  subnet.vlan.vid,
                              ))
                continue

            # Update the gateway on the subnet if one is not set.
            if (subnet.gateway_ip is None and link.get("gateway")
                    and IPAddress(link["gateway"]) in subnet.get_ipnetwork()):
                subnet.gateway_ip = link["gateway"]
                subnet.save()

            address_type = (IPADDRESS_TYPE.DISCOVERED if
                            node.is_commissioning() else IPADDRESS_TYPE.STICKY)
            # IP address is not assigned to this interface. Get or
            # create that IP address.
            ip_address, created = StaticIPAddress.objects.update_or_create(
                ip=ip_addr,
                defaults={
                    "alloc_type": address_type,
                    "subnet": subnet,
                },
            )
            if not created:
                current_ip_addresses.discard(ip_address)

            # Update the properties and make sure all interfaces
            # assigned to the address belong to this node.
            for attached_nic in ip_address.interface_set.all():
                if attached_nic.node != node:
                    attached_nic.ip_addresses.remove(ip_address)

            # Add this IP address to the interface.
            interface.ip_addresses.add(ip_address)
            updated_ip_addresses.add(ip_address)

    # Remove all the current IP address that no longer apply to this
    # interface.
    for ip_address in current_ip_addresses:
        interface.unlink_ip_address(ip_address)

    return updated_ip_addresses
예제 #54
0
    def clone(self):
        """
        Command Section: clone
        Clone a VM from a template
        """
        self.config['hostname'] = self.config['hostname'].lower()
        self.config['mem'] = int(self.config['mem'] * 1024)  # convert GB to MB

        print "Cloning %s to new host %s with %sMB RAM..." % (
            self.config['template'], self.config['hostname'],
            'same ammount of '
            if self.config['mem'] == 0 else self.config['mem'])

        # initialize a list to hold our network settings
        ip_settings = list()

        # Get network settings for each IP
        net_data = self.config['ips']

        matched_network = None
        for network in self.config['networks']:
            if (net_data == network) and (
                    self.config['networks'][network]['type']
                    == 'dhcp'):  # we got a match for a network with dhcp
                matched_network = network
                break
        # if no matched network yet, try to match static settings
        if not matched_network:
            ip = IPAddress(net_data)
            for network in self.config['networks']:
                if self.config['networks'][network]['type'] != 'static':
                    continue
                if ip not in IPNetwork(
                        self.config['networks'][network]['network']):
                    continue
                self.config['networks'][network]['ip'] = ip
                ipnet = IPNetwork(self.config['networks'][network]['network'])
                self.config['networks'][network]['subnet_mask'] = str(
                    ipnet.netmask)
                ip_settings.append(self.config['networks'][network])
                matched_network = network
                break

        if not matched_network:
            print "I don't know what network %s is in.  You can supply " \
                  "settings for this network in config.yml." % net_data
            sys.exit(1)

        network_label = self.config['networks'][matched_network][
            'network_label']
        self.get_obj([vim.Network], network_label)  # validate VLAN
        datacenter = self.get_obj(
            [vim.Datacenter],
            self.config['networks'][matched_network]['datacenter'])
        destfolder = datacenter.vmFolder
        cluster = self.get_obj(
            [vim.ClusterComputeResource],
            self.config['networks'][matched_network]['cluster'])

        relospec = vim.vm.RelocateSpec()

        datastore = self.get_obj(
            [vim.Datastore],
            self.config['networks'][matched_network]['datastore'])
        relospec.datastore = datastore

        resource_pool_str = self.config['resource_pool']
        if resource_pool_str == 'Resources' and (
                'resource_pool' in self.config['networks'][matched_network]):
            resource_pool_str = self.config['networks'][matched_network][
                'resource_pool']
        resource_pool = self.get_resource_pool(cluster, resource_pool_str)
        if resource_pool:
            relospec.pool = resource_pool

        host_system = self.config['host']
        if host_system != "":
            host_system = self.get_obj([vim.HostSystem], self.config['host'])
        if host_system:
            relospec.host = host_system

        template_vm = self.get_vm_failfast(self.config['template'], False,
                                           'Template VM')

        devices = []
        # delete existing NIC devices from template
        try:
            for device in template_vm.config.hardware.device:

                if hasattr(device, 'addressType'):
                    # this is a VirtualEthernetCard, so we'll delete it
                    nic = vim.vm.device.VirtualDeviceSpec()
                    nic.operation = \
                        vim.vm.device.VirtualDeviceSpec.Operation.remove
                    nic.device = device
                    devices.append(nic)
        except:
            # not the most graceful handling, but unable to reproduce
            # user's issues in #57 at this time.
            pass

        # add a new NIC device
        nic = vim.vm.device.VirtualDeviceSpec()
        nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
        nic.device = vim.vm.device.VirtualVmxnet3()
        nic.device.wakeOnLanEnabled = True
        nic.device.addressType = 'assigned'
        nic.device.key = 4000  # 4000 seems to be the value to use for a vmxnet3 device
        nic.device.deviceInfo = vim.Description()
        nic.device.deviceInfo.label = 'Network adapter %s' % (1)
        nic.device.deviceInfo.summary = network_label
        nic.device.backing = (
            vim.vm.device.VirtualEthernetCard.NetworkBackingInfo())
        nic.device.backing.network = (self.get_obj([vim.Network],
                                                   network_label))
        nic.device.backing.deviceName = network_label
        nic.device.backing.useAutoDetect = True
        nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
        nic.device.connectable.startConnected = True
        nic.device.connectable.allowGuestControl = True
        devices.append(nic)

        adaptermaps = []
        if self.config['networks'][matched_network]['type'] == 'dhcp':
            guest_map = vim.vm.customization.AdapterMapping()
            guest_map.adapter = vim.vm.customization.IPSettings(
                ip=vim.vm.customization.DhcpIpGenerator(),
                dnsDomain='domain.local')
            adaptermaps.append(guest_map)
        else:
            # guest NIC settings, i.e. 'adapter map'
            guest_map = vim.vm.customization.AdapterMapping()
            guest_map.adapter = vim.vm.customization.IPSettings()
            guest_map.adapter.ip = vim.vm.customization.FixedIp()
            guest_map.adapter.ip.ipAddress = str(
                self.config['networks'][matched_network]['ip'])
            guest_map.adapter.subnetMask = str(
                self.config['networks'][matched_network]['subnet_mask'])

            # these may not be set for certain IPs
            try:
                guest_map.adapter.gateway = self.config['networks'][
                    matched_network]['gateway']
            except:
                pass

            try:
                guest_map.adapter.dnsDomain = self.config['domain']
            except:
                pass

            adaptermaps.append(guest_map)

        if len(adaptermaps) == 0:
            print "I cannot apply network settings to new VM"
            sys.exit(1)

        # VM config spec
        vmconf = vim.vm.ConfigSpec()
        if self.config['cpus'] == 0:
            vmconf.numCPUs = template_vm.summary.config.numCpu
        else:
            vmconf.numCPUs = self.config['cpus']
        if self.config['mem'] == 0:
            vmconf.memoryMB = template_vm.summary.config.memorySizeMB
        else:
            vmconf.memoryMB = self.config['mem']
        vmconf.cpuHotAddEnabled = True
        vmconf.memoryHotAddEnabled = True
        vmconf.deviceChange = devices

        # DNS settings
        globalip = vim.vm.customization.GlobalIPSettings()
        globalip.dnsServerList = self.config['dns_servers']
        globalip.dnsSuffixList = self.config['domain']

        # Hostname settings
        ident = vim.vm.customization.LinuxPrep()
        ident.domain = self.config['domain']
        ident.hostName = vim.vm.customization.FixedName()
        ident.hostName.name = self.config['hostname']

        customspec = vim.vm.customization.Specification()
        customspec.nicSettingMap = adaptermaps
        customspec.globalIPSettings = globalip
        customspec.identity = ident

        # Clone spec
        clonespec = vim.vm.CloneSpec()
        clonespec.location = relospec
        clonespec.config = vmconf
        clonespec.customization = customspec
        clonespec.powerOn = not self.config['suppress_power_on']
        clonespec.template = False

        self.addDisks(template_vm, clonespec)

        if self.debug:
            self.print_debug("CloneSpec", clonespec)

        # fire the clone task
        tasks = [
            template_vm.Clone(folder=destfolder,
                              name=self.config['hostname'],
                              spec=clonespec)
        ]
        result = self.WaitForTasks(tasks)

        if self.config['post_clone_cmd']:
            try:
                # helper env variables
                os.environ['EZMOMI_CLONE_HOSTNAME'] = self.config['hostname']
                print "Running --post-clone-cmd %s" % \
                      self.config['post_clone_cmd']
                os.system(self.config['post_clone_cmd'])

            except Exception as e:
                print "Error running post-clone command. Exception: %s" % e
                pass

        # send notification email
        if self.config['mail']:
            self.send_email()
예제 #55
0
    def scan(self):
        nics = []
        for interface in os.listdir('/sys/class/net/'):
            # ignore if it's not a link (ie: bonding_masters etc)
            if not os.path.islink('/sys/class/net/{}'.format(interface)):
                continue

            if config.network.ignore_interfaces and \
               re.match(config.network.ignore_interfaces, interface):
                logging.debug(
                    'Ignore interface {interface}'.format(interface=interface))
                continue

            ip_addr = netifaces.ifaddresses(interface).get(
                netifaces.AF_INET, [])
            ip6_addr = netifaces.ifaddresses(interface).get(
                netifaces.AF_INET6, [])

            # netifaces returns a ipv6 netmask that netaddr does not understand.
            # this strips the netmask down to the correct format for netaddr,
            # and remove the interface.
            # ie, this:
            #   {
            #      'addr': 'fe80::ec4:7aff:fe59:ec4a%eno1.50',
            #      'netmask': 'ffff:ffff:ffff:ffff::/64'
            #   }
            #
            # becomes:
            #   {
            #      'addr': 'fe80::ec4:7aff:fe59:ec4a',
            #      'netmask': 'ffff:ffff:ffff:ffff::'
            #   }
            #
            for addr in ip6_addr:
                addr["addr"] = addr["addr"].replace('%{}'.format(interface),
                                                    '')
                addr["netmask"] = addr["netmask"].split('/')[0]
                ip_addr.append(addr)

            if config.network.ignore_ips and ip_addr:
                for i, ip in enumerate(ip_addr):
                    if re.match(config.network.ignore_ips, ip['addr']):
                        ip_addr.pop(i)

            mac = open('/sys/class/net/{}/address'.format(interface),
                       'r').read().strip()
            vlan = None
            if len(interface.split('.')) > 1:
                vlan = int(interface.split('.')[1])
            bonding = False
            bonding_slaves = []
            if os.path.isdir('/sys/class/net/{}/bonding'.format(interface)):
                bonding = True
                bonding_slaves = open(
                    '/sys/class/net/{}/bonding/slaves'.format(
                        interface)).read().split()

            # Tun and TAP support
            virtual = os.path.isfile(
                '/sys/class/net/{}/tun_flags'.format(interface))

            nic = {
                'name':
                interface,
                'mac':
                mac if mac != '00:00:00:00:00:00' else None,
                'ip': [
                    '{}/{}'.format(x['addr'],
                                   IPAddress(x['netmask']).netmask_bits())
                    for x in ip_addr
                ] if ip_addr else None,  # FIXME: handle IPv6 addresses
                'ethtool':
                Ethtool(interface).parse(),
                'virtual':
                virtual,
                'vlan':
                vlan,
                'bonding':
                bonding,
                'bonding_slaves':
                bonding_slaves,
            }
            nics.append(nic)
        return nics
예제 #56
0
 def exabgp_msg(bgp_message):
     redis.set(
         "exabgp_seen_bgp_update",
         "1",
         ex=int(
             os.getenv(
                 "MON_TIMEOUT_LAST_BGP_UPDATE",
                 DEFAULT_MON_TIMEOUT_LAST_BGP_UPDATE,
             )),
     )
     msg = {
         "type": bgp_message["type"],
         "communities": bgp_message.get("communities", []),
         "timestamp": float(bgp_message["timestamp"]),
         "path": bgp_message.get("path", []),
         "service": "exabgp|{}".format(self.host),
         "prefix": bgp_message["prefix"],
         "peer_asn": int(bgp_message["peer_asn"]),
     }
     for prefix in self.prefixes:
         try:
             base_ip, mask_length = bgp_message["prefix"].split(
                 "/")
             our_prefix = IPNetwork(prefix)
             if (IPAddress(base_ip) in our_prefix and
                     int(mask_length) >= our_prefix.prefixlen):
                 try:
                     if validator.validate(msg):
                         msgs = normalize_msg_path(msg)
                         for msg in msgs:
                             key_generator(msg)
                             log.debug(msg)
                             if self.autoconf:
                                 # thread-safe access to update dict
                                 lock.acquire()
                                 try:
                                     if self.learn_neighbors:
                                         msg["learn_neighbors"] = True
                                     self.autoconf_updates[
                                         msg["key"]] = msg
                                     # mark the autoconf BGP updates for configuration
                                     # processing in redis
                                     redis_pipeline = redis.pipeline(
                                     )
                                     redis_pipeline.sadd(
                                         "autoconf-update-keys-to-process",
                                         msg["key"],
                                     )
                                     redis_pipeline.execute()
                                 except Exception:
                                     log.exception("exception")
                                 finally:
                                     lock.release()
                             else:
                                 with Producer(connection
                                               ) as producer:
                                     producer.publish(
                                         msg,
                                         exchange=self.
                                         update_exchange,
                                         routing_key="update",
                                         serializer="ujson",
                                     )
                     else:
                         log.warning(
                             "Invalid format message: {}".
                             format(msg))
                 except BaseException:
                     log.exception(
                         "Error when normalizing BGP message: {}"
                         .format(msg))
                 break
         except Exception:
             log.exception("exception")
예제 #57
0
 def test_form_ipv4_valid(self):
     form = self.form_class({'field': '10.0.0.1'})
     self.assertTrue(form.is_valid())
     self.assertEqual(form.cleaned_data['field'], IPAddress('10.0.0.1'))
예제 #58
0
 def ip_belongs_to(cls, ipnet, ipaddr):
     return IPAddress(ipaddr) in ipnet
예제 #59
0
파일: snat.py 프로젝트: John-Lin/nat
 def _is_public(self, ip):
     ip = IPAddress(ip)
     return ip.is_unicast() and not ip.is_private()
예제 #60
0
class TestOMEDeviceGroup(FakeAnsibleModule):
    module = ome_device_group

    def test_ome_device_group_get_group_id_case01(
            self, ome_connection_mock_for_device_group, ome_response_mock):
        f_module = self.get_module_mock(
            params={
                "name": "Storage Services",
                "device_ids": [25011],
                "device_service_tags": []
            })
        ome_response_mock.json_data = {"value": []}
        with pytest.raises(Exception) as exc:
            self.module.get_group_id(ome_connection_mock_for_device_group,
                                     f_module)
        assert exc.value.args[0] == "Unable to complete the operation because the entered " \
                                    "target group name 'Storage Services' is invalid."
        ome_response_mock.json_data = {
            "value": [{
                "Id": 25011,
                "CreatedBy": "user",
                "TypeId": 3000,
                "MembershipTypeId": 12
            }]
        }
        resp = self.module.get_group_id(ome_connection_mock_for_device_group,
                                        f_module)
        assert resp == 25011

    def test_ome_device_group_get_group_id_case02(
            self, ome_connection_mock_for_device_group, ome_response_mock):
        f_module = self.get_module_mock(params={
            "group_id": 1234,
            "device_ids": [25011],
            "device_service_tags": []
        })
        ome_connection_mock_for_device_group.invoke_request.side_effect = HTTPError(
            'http://testhost.com', 400, 'http error message',
            {"accept-type": "application/json"},
            StringIO(to_text(json.dumps({"info": "error_details"}))))
        with pytest.raises(Exception) as exc1:
            self.module.get_group_id(ome_connection_mock_for_device_group,
                                     f_module)
        assert exc1.value.args[0] == "Unable to complete the operation because the entered " \
                                     "target group Id '1234' is invalid."

    def test_ome_device_group_get_group_id_case03(
            self, ome_connection_mock_for_device_group, ome_response_mock):
        f_module = self.get_module_mock(params={
            "group_id": 1234,
            "device_ids": [25011],
            "device_service_tags": []
        })
        ome_response_mock.json_data = {
            "Id": 1234,
            "CreatedBy": "user",
            "TypeId": 3000,
            "MembershipTypeId": 12
        }
        resp = self.module.get_group_id(ome_connection_mock_for_device_group,
                                        f_module)
        assert resp == 1234

    def test_ome_device_group_get_device_id(
            self, ome_connection_mock_for_device_group):
        report_list = [{
            "Id": 25011,
            "DeviceServiceTag": "SEFRG2"
        }, {
            "Id": 25012,
            "DeviceServiceTag": "SEFRG3"
        }]
        ome_connection_mock_for_device_group.get_all_report_details.return_value = {
            "report_list": report_list
        }
        f_module = self.get_module_mock(params={
            "name": "Storage Services",
            "device_ids": [25011, 25012]
        })
        device_list, key = self.module.get_device_id(
            ome_connection_mock_for_device_group, f_module)
        assert device_list == [25011, 25012]
        assert key == "Id"
        f_module = self.get_module_mock(
            params={
                "name": "Storage Services",
                "device_service_tags": ["SEFRG2", "SEFRG3"]
            })
        device_list, key = self.module.get_device_id(
            ome_connection_mock_for_device_group, f_module)
        assert device_list == [25011, 25012]
        assert key == "DeviceServiceTag"

        f_module = self.get_module_mock(params={
            "name": "Storage Services",
            "device_ids": [25011, 25000]
        })
        with pytest.raises(Exception) as exc:
            self.module.get_device_id(ome_connection_mock_for_device_group,
                                      f_module)
        assert exc.value.args[0] == "Unable to complete the operation because the entered target " \
                                    "device id(s) '25000' are invalid."

    def test_ome_device_group_add_member_to_group(
            self, ome_connection_mock_for_device_group, ome_response_mock):
        report_list = [{"Id": 25011, "DeviceServiceTag": "SEFRG2"}]
        ome_connection_mock_for_device_group.get_all_report_details.return_value = {
            "report_list": report_list
        }
        f_module = self.get_module_mock(params={
            "name": "Storage Services",
            "device_ids": [25011]
        })
        ome_response_mock.status_code = 204
        ome_response_mock.success = True
        with pytest.raises(Exception) as exc:
            self.module.add_member_to_group(
                f_module, ome_connection_mock_for_device_group, 1, [25011],
                "Id")
        assert exc.value.args[0] == "No changes found to be applied."

        f_module.check_mode = True
        with pytest.raises(Exception) as exc:
            self.module.add_member_to_group(
                f_module, ome_connection_mock_for_device_group, 1, [25011],
                "Id")
        assert exc.value.args[0] == "No changes found to be applied."

        f_module.check_mode = False
        report_list = [{
            "Id": 25013,
            "DeviceServiceTag": "SEFRG4"
        }, {
            "Id": 25014,
            "DeviceServiceTag": "SEFRG5"
        }]
        ome_connection_mock_for_device_group.get_all_report_details.return_value = {
            "report_list": report_list
        }
        resp, [] = self.module.add_member_to_group(
            f_module, ome_connection_mock_for_device_group, 1, [25011, 25012],
            "Id")
        assert resp.status_code == 204

        f_module.check_mode = True
        with pytest.raises(Exception) as exc:
            self.module.add_member_to_group(
                f_module, ome_connection_mock_for_device_group, 1,
                [25011, 25012], "Id")
        assert exc.value.args[0] == "Changes found to be applied."

    def test_ome_device_group_main_exception(
            self, ome_connection_mock_for_device_group, mocker,
            ome_response_mock, ome_default_args):
        ome_default_args.update({
            "name": "Storage Services",
            "device_ids": [25011, 25012]
        })
        ome_response_mock.status_code = 204
        ome_response_mock.success = True
        mocker.patch(MODULE_PATH + 'get_group_id', return_value=1)
        mocker.patch(MODULE_PATH + 'get_device_id',
                     return_value=[25011, 25012])
        mocker.patch(MODULE_PATH + 'add_member_to_group',
                     return_value=(ome_response_mock, []))
        result = self._run_module(ome_default_args)
        assert result[
            'msg'] == "Successfully added member(s) to the device group."

    def test_ome_device_group_argument_exception_case1(self, ome_default_args):
        ome_default_args.update({
            "name": "Storage Services",
            "device_ids": [25011, 25012],
            "group_id": 1234
        })
        result = self._run_module_with_fail_json(ome_default_args)
        assert result[
            "msg"] == "parameters are mutually exclusive: name|group_id"

    def test_ome_device_group_argument_exception_case2(self, ome_default_args):
        ome_default_args.update({
            "device_ids": [25011, 25012],
            "group_id": 1234,
            "device_service_tags": [Constants.service_tag1]
        })
        result = self._run_module_with_fail_json(ome_default_args)
        assert result[
            "msg"] == "parameters are mutually exclusive: device_ids|device_service_tags|ip_addresses"

    def test_ome_device_group_argument_exception_case3(self, ome_default_args):
        ome_default_args.update({"device_ids": [25011, 25012]})
        result = self._run_module_with_fail_json(ome_default_args)
        assert result[
            "msg"] == "one of the following is required: name, group_id"

    def test_ome_device_group_argument_exception_case4(self, ome_default_args):
        ome_default_args.update({"group_id": 1234})
        result = self._run_module_with_fail_json(ome_default_args)
        assert result[
            "msg"] == "one of the following is required: device_ids, device_service_tags, ip_addresses"

    def test_ome_device_group_argument_exception_case5(self, ome_default_args):
        ome_default_args.update({
            "device_ids": None,
            "group_id": 1234,
            "device_service_tags": None
        })
        result = self._run_module_with_fail_json(ome_default_args)
        assert result[
            "msg"] == "parameters are mutually exclusive: device_ids|device_service_tags|ip_addresses"

    @pytest.mark.parametrize("exc_type", [
        IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError,
        URLError
    ])
    def test_ome_device_group_argument_main_exception_failure_case(
            self, exc_type, mocker, ome_default_args,
            ome_connection_mock_for_device_group, ome_response_mock):
        ome_default_args.update({
            "name": "Storage Services",
            "device_ids": [25011, 25012]
        })
        ome_response_mock.status_code = 400
        ome_response_mock.success = False
        json_str = to_text(json.dumps({"info": "error_details"}))
        if exc_type == URLError:
            mocker.patch(MODULE_PATH + 'get_group_id',
                         side_effect=exc_type("url open error"))
            result = self._run_module(ome_default_args)
            assert result["unreachable"] is True
        elif exc_type not in [HTTPError, SSLValidationError]:
            mocker.patch(MODULE_PATH + 'get_group_id',
                         side_effect=exc_type("exception message"))
            result = self._run_module_with_fail_json(ome_default_args)
            assert result['failed'] is True
        else:
            mocker.patch(MODULE_PATH + 'get_group_id',
                         side_effect=exc_type(
                             'http://testhost.com', 400, 'http error message',
                             {"accept-type": "application/json"},
                             StringIO(json_str)))
            result = self._run_module_with_fail_json(ome_default_args)
            assert result['failed'] is True
        assert 'msg' in result

    @pytest.mark.parametrize("inp", [{
        "TypeId": 3000,
        "MembershipTypeId": 24
    }, {
        "TypeId": 1000,
        "MembershipTypeId": 24
    }, {
        "TypeId": 2000,
        "MembershipTypeId": 12
    }])
    def test_validate_group_case01(self, inp, ome_response_mock):
        group_resp = {
            "Id": 25011,
            "CreatedBy": "user",
            "TypeId": inp["TypeId"],
            "MembershipTypeId": inp["MembershipTypeId"]
        }
        f_module = self.get_module_mock(params={
            "name": "group1",
            "device_ids": [25011]
        })
        with pytest.raises(Exception) as exc:
            self.module.validate_group(group_resp, f_module, "name", "group1")
        assert exc.value.args[0] == ADD_STATIC_GROUP_MESSAGE

    @pytest.mark.parametrize(
        "inp,out",
        [(['192.168.2.0'], [IPAddress('192.168.2.0')]),
         (['fe80::ffff:ffff:ffff:ffff'
           ], [IPAddress('fe80::ffff:ffff:ffff:ffff')]),
         (['192.168.2.0/24'], [IPNetwork('192.168.2.0/24')]),
         (['fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff'],
          [IPRange('fe80::ffff:ffff:ffff:1111', 'fe80::ffff:ffff:ffff:ffff')]),
         ([
             '192.168.2.0', 'fe80::ffff:ffff:ffff:ffff', '192.168.2.0/24',
             'fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff',
             '2002:c000:02e6::1/48'
         ], [
             IPAddress('192.168.2.0'),
             IPAddress('fe80::ffff:ffff:ffff:ffff'),
             IPNetwork('192.168.2.0/24'),
             IPRange('fe80::ffff:ffff:ffff:1111', 'fe80::ffff:ffff:ffff:ffff'),
             IPNetwork('2002:c000:02e6::1/48')
         ])])
    def test_get_all_ips_success_case(self, inp, out):
        f_module = self.get_module_mock(params={
            "name": "group1",
            "ip_addresses": inp
        })
        res = self.module.get_all_ips(inp, f_module)
        assert res == out

    @pytest.mark.parametrize(
        "inp", [["abc"], [""], ["266.128"], ["100:1bcd:xyz"],
                ["192.168.0.0--192.168.0.1"], ["-192.168.0.0-192.168.0.1"],
                ["-192.168.0.0192.168.0.1"], ["192.168.0.0-192.168.0.1-"],
                ["192.168.0.0192.168.0.1-"], ["192.168.0.1//24"],
                ["\192.168.0.1//24"], ["192.168.0.1/\24"], ["/192.168.0.1/24"],
                ["1.12.1.36/255.255.255.88"]],
        ids=[
            "abc", "", "266.128", "100:1bcd:xyz", "192.168.0.0--192.168.0.1",
            "-192.168.0.0-192.168.0.1", "-192.168.0.0192.168.0.1",
            "192.168.0.0-192.168.0.1-", "192.168.0.0192.168.0.1-",
            "192.168.0.1//24", "\192.168.0.1//24", "192.168.0.1/\24",
            "/192.168.0.1/24", "1.12.1.36/255.255.255.88"
        ])
    def test_get_all_ips_failure_case(self, inp):
        f_module = self.get_module_mock(params={
            "name": "group1",
            "ip_addresses": inp
        })
        with pytest.raises(Exception,
                           match=INVALID_IP_FORMAT.format(inp[0])) as err:
            self.module.get_all_ips(inp, f_module)

    def test_get_device_id_from_ip_success_case(self):
        device_list = [{
            "Id":
            1111,
            "Identifier":
            "device1",
            "DeviceServiceTag":
            "device1",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.2.255",
            }],
        }, {
            "Id":
            2222,
            "Identifier":
            "device2",
            "DeviceServiceTag":
            "device2",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.4.10",
            }],
        }, {
            "Id":
            3333,
            "Identifier":
            "device3",
            "DeviceServiceTag":
            "device3",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.2.10",
            }],
        }, {
            "Id":
            4444,
            "Identifier":
            "device4",
            "DeviceServiceTag":
            "device4",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.3.10",
            }],
        }, {
            "Id":
            5555,
            "Identifier":
            "device5",
            "DeviceServiceTag":
            "device5",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.4.3",
            }],
        }, {
            "Id":
            6666,
            "Identifier":
            "device6",
            "DeviceServiceTag":
            "device6",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.3.11",
            }],
        }, {
            "Id":
            7777,
            "Identifier":
            "device7",
            "DeviceServiceTag":
            "device7",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.3.0",
            }],
        }, {
            "Id":
            8888,
            "Identifier":
            "device8",
            "DeviceServiceTag":
            "device8",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.4.1",
            }],
        }, {
            "Id":
            9999,
            "Identifier":
            "device9",
            "DeviceServiceTag":
            "device9",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.4.5",
            }],
        }, {
            "Id":
            1010,
            "Identifier":
            "device10",
            "DeviceServiceTag":
            "device10",
            "DeviceManagement": [{
                "NetworkAddress": "192.168.4.9",
            }],
        }, {
            "Id":
            1011,
            "Identifier":
            "device11",
            "DeviceServiceTag":
            "device11",
            "DeviceManagement": [{
                "NetworkAddress": "[fe80::de0:b6b3:a764:0]",
            }],
        }, {
            "Id":
            1012,
            "Identifier":
            "device11",
            "DeviceServiceTag":
            "device11",
            "DeviceManagement": [{
                "NetworkAddress": "[fe90::de0:b6b3:a764:0]",
            }],
        }]
        output = {
            3333: "192.168.2.10",
            4444: "192.168.3.10",
            5555: "192.168.4.3",
            6666: "192.168.3.11",
            7777: "192.168.3.0",
            8888: "192.168.4.1",
            9999: "192.168.4.5",
            1010: "192.168.4.9",
            1011: "fe80::de0:b6b3:a764:0"
        }
        ip_addresses = [
            IPNetwork("::ffff:192.168.2.0/125"),
            IPAddress("192.168.2.10"),
            IPAddress('fe80::ffff:ffff:ffff:ffff'),
            IPNetwork('fe80::ffff:ffff:ffff:ffff/24'),
            IPNetwork('192.168.3.0/24'),
            IPRange('192.168.4.1', '192.168.4.9')
        ]
        f_module = self.get_module_mock(
            params={
                "name":
                "group1",
                "ip_addresses": [
                    "::ffff:192.168.2.0/125", "192.168.2.10",
                    'fe80::ffff:ffff:ffff:ffff', '192.168.3.0/24',
                    '192.168.4.1-192.168.4.9', 'fe80::ffff:ffff:ffff:ffff/24'
                ]
            })
        res = self.module.get_device_id_from_ip(ip_addresses, device_list,
                                                f_module)
        assert res == output

    def test_get_device_id_from_ip_failure_case(self):
        device_list = [
            {
                "Id": 1111,
                "Identifier": "device1",
                "DeviceServiceTag": "device1",
                "DeviceManagement": [{
                    "NetworkAddress": "192.168.2.255",
                }],
            },
        ]
        ip_addresses = [
            IPNetwork("::ffff:192.168.2.0/125"),
            IPAddress("192.168.2.10"),
            IPAddress('fe80::ffff:ffff:ffff:ffff'),
            IPNetwork('fe80::ffff:ffff:ffff:ffff/24'),
            IPNetwork('192.168.3.0/24'),
            IPRange('192.168.4.1', '192.168.4.9')
        ]
        with pytest.raises(Exception, match=IP_NOT_EXISTS):
            f_module = self.get_module_mock(
                params={
                    "name":
                    "group1",
                    "ip_addresses": [
                        "::ffff:192.168.2.0/125", "192.168.2.10",
                        'fe80::ffff:ffff:ffff:ffff', '192.168.3.0/24',
                        '192.168.4.1-192.168.4.9',
                        'fe80::ffff:ffff:ffff:ffff/24'
                    ]
                })
            self.module.get_device_id_from_ip(ip_addresses, device_list,
                                              f_module)

    # def test_add_member_to_group_case01(self, ome_connection_mock_for_device_group, ome_response_mock):
    #     report_list = [{"Id": 3333, "DeviceServiceTag": "device1",
    #                     "DeviceManagement": [{"NetworkAddress": "192.168.2.10"},
    #                                          ]},
    #                    {"Id": 1013, "DeviceServiceTag": "device1",
    #                     "DeviceManagement": [{"NetworkAddress": "192.168.5.10"},
    #                                          ]}
    #                    ]
    #     ome_connection_mock_for_device_group.get_all_report_details.return_value = {"report_list": report_list}
    #     f_module = self.get_module_mock(params={"name": "group1",
    #                                             "ip_addresses": ["::ffff:192.168.2.0/125",
    #                                                              "192.168.2.10",
    #                                                              'fe80::ffff:ffff:ffff:ffff',
    #                                                              '192.168.3.0/24',
    #                                                              '192.168.4.1-192.168.4.9',
    #                                                              'fe80::ffff:ffff:ffff:ffff/24']})
    #     device_id = {3333: "192.168.2.10", 4444: "192.168.3.10",
    #                  5555: "192.168.4.3",
    #                  1011: "fe80::de0:b6b3:a764:0"}
    #     ome_response_mock.status_code = 204
    #     added_ips_out = ["192.168.3.10", "192.168.4.3", "fe80::de0:b6b3:a764:0"]
    #     resp, added_ips = self.module.add_member_to_group(f_module, ome_connection_mock_for_device_group, 1, device_id,
    #                                                       "IPAddresses")
    #     assert resp.status_code == 204
    #     assert added_ips == added_ips_out

    def test_add_member_to_group_checkmode_case01(
            self, ome_connection_mock_for_device_group, ome_response_mock):
        report_list = [{
            "Id":
            3333,
            "DeviceServiceTag":
            "device1",
            "DeviceManagement": [
                {
                    "NetworkAddress": "192.168.2.10"
                },
            ]
        }, {
            "Id":
            1013,
            "DeviceServiceTag":
            "device1",
            "DeviceManagement": [
                {
                    "NetworkAddress": "192.168.5.10"
                },
            ]
        }]
        ome_connection_mock_for_device_group.get_all_report_details.return_value = {
            "report_list": report_list
        }
        f_module = self.get_module_mock(params={
            "name":
            "group1",
            "ip_addresses": [
                "::ffff:192.168.2.0/125", "192.168.2.10",
                'fe80::ffff:ffff:ffff:ffff', '192.168.3.0/24',
                '192.168.4.1-192.168.4.9', 'fe80::ffff:ffff:ffff:ffff/24'
            ]
        },
                                        check_mode=True)
        device_id = {
            3333: "192.168.2.10",
            4444: "192.168.3.10",
            5555: "192.168.4.3",
            1011: "fe80::de0:b6b3:a764:0"
        }
        with pytest.raises(Exception, match="Changes found to be applied."):
            self.module.add_member_to_group(
                f_module, ome_connection_mock_for_device_group, 1, device_id,
                "IPAddresses")

    def test_add_member_to_group_checkmode_case02(
            self, ome_connection_mock_for_device_group, ome_response_mock):
        report_list = [{
            "Id":
            3333,
            "DeviceServiceTag":
            "device1",
            "DeviceManagement": [
                {
                    "NetworkAddress": "192.168.2.10"
                },
            ]
        }, {
            "Id":
            1013,
            "DeviceServiceTag":
            "device1",
            "DeviceManagement": [
                {
                    "NetworkAddress": "192.168.5.10"
                },
            ]
        }]
        ome_connection_mock_for_device_group.get_all_report_details.return_value = {
            "report_list": report_list
        }
        f_module = self.get_module_mock(params={
            "name": "group1",
            "ip_addresses": ["192.168.2.10"]
        },
                                        check_mode=True)
        device_id = {3333: "192.168.2.10"}
        with pytest.raises(Exception, match="No changes found to be applied."):
            self.module.add_member_to_group(
                f_module, ome_connection_mock_for_device_group, 1, device_id,
                "IPAddresses")

    def test_add_member_to_group_idempotency_case(
            self, ome_connection_mock_for_device_group, ome_response_mock):
        report_list = [{
            "Id":
            3333,
            "DeviceServiceTag":
            "device1",
            "DeviceManagement": [
                {
                    "NetworkAddress": "192.168.2.10"
                },
            ]
        }, {
            "Id":
            1013,
            "DeviceServiceTag":
            "device1",
            "DeviceManagement": [
                {
                    "NetworkAddress": "192.168.5.10"
                },
            ]
        }]
        ome_connection_mock_for_device_group.get_all_report_details.return_value = {
            "report_list": report_list
        }
        f_module = self.get_module_mock(params={
            "name": "group1",
            "ip_addresses": ["192.168.2.10"]
        })
        device_id = {3333: "192.168.2.10"}
        with pytest.raises(Exception) as exc:
            self.module.add_member_to_group(
                f_module, ome_connection_mock_for_device_group, 1, device_id,
                "IPAddresses")

        assert exc.value.args[0] == "No changes found to be applied."

    def test_ome_device_group_main_ip_address_case(
            self, ome_connection_mock_for_device_group, mocker,
            ome_response_mock, ome_default_args):
        ome_default_args.update({
            "name": "Storage Services",
            "ip_addresses": ["192.168.2.10"]
        })
        ome_response_mock.status_code = 204
        ome_response_mock.success = True
        mocker.patch(MODULE_PATH + 'get_group_id', return_value=1)
        mocker.patch(MODULE_PATH + 'get_device_id',
                     return_value=[25011, 25012])
        mocker.patch(MODULE_PATH + 'add_member_to_group',
                     return_value=(ome_response_mock, ["192.168.2.10"]))
        result = self._run_module(ome_default_args)
        assert result[
            'msg'] == "Successfully added member(s) to the device group."
        assert result['ip_addresses_added'] == ["192.168.2.10"]

    def test_get_device_id_ip_address_case(
            self, ome_connection_mock_for_device_group, mocker):
        f_module = self.get_module_mock(params={
            "name": "group1",
            "ip_addresses": ["192.168.2.10"]
        })
        mocker.patch(MODULE_PATH + 'get_all_ips',
                     return_value=[IPAddress("192.168.2.10")])
        mocker.patch(MODULE_PATH + 'get_device_id_from_ip',
                     return_value={1111: "192.168.2.10"})
        each_device_list, key = self.module.get_device_id(
            ome_connection_mock_for_device_group, f_module)
        assert key == "IPAddresses"
        assert each_device_list == {1111: "192.168.2.10"}