예제 #1
0
def _is_ip_address(pattern):
    """
    Check whether *pattern* could be/match an IP address.

    :param pattern: A pattern for a host name.
    :type pattern: `bytes` or `unicode`

    :return: `True` if *pattern* could be an IP address, else `False`.
    :rtype: bool
    """
    if isinstance(pattern, bytes):
        try:
            pattern = pattern.decode("ascii")
        except UnicodeError:
            return False

    try:
        int(pattern)
        return True
    except ValueError:
        pass

    try:
        ipaddress.ip_address(pattern.replace("*", "1"))
    except ValueError:
        return False

    return True
예제 #2
0
def isValidIPAddress(ip):
    """Check if ip is a valid IPv4/IPv6 address"""
    try:
        ipaddress.ip_address(ip)
        return True
    except:
        return False
예제 #3
0
파일: vpp_ipsec.py 프로젝트: vpp-dev/vpp
 def __init__(self, test, spd, sa_id,
              local_start, local_stop,
              remote_start, remote_stop,
              proto,
              priority=100,
              policy=None,
              is_outbound=1,
              remote_port_start=0,
              remote_port_stop=65535,
              local_port_start=0,
              local_port_stop=65535):
     self.test = test
     self.spd = spd
     self.sa_id = sa_id
     self.local_start = ip_address(text_type(local_start))
     self.local_stop = ip_address(text_type(local_stop))
     self.remote_start = ip_address(text_type(remote_start))
     self.remote_stop = ip_address(text_type(remote_stop))
     self.proto = proto
     self.is_outbound = is_outbound
     self.priority = priority
     if not policy:
         self.policy = (VppEnum.vl_api_ipsec_spd_action_t.
                        IPSEC_API_SPD_ACTION_BYPASS)
     else:
         self.policy = policy
     self.is_ipv6 = (0 if self.local_start.version == 4 else 1)
     self.local_port_start = local_port_start
     self.local_port_stop = local_port_stop
     self.remote_port_start = remote_port_start
     self.remote_port_stop = remote_port_stop
예제 #4
0
    def verify(self, openvpn):
        interface_pattern = '(tap|tun)[0-9]'
        node = ConfigNode('service.openvpn', self.configstore).__getstate__()
        node.update(openvpn)
        
        if not re.search(interface_pattern, node['dev']):
            raise VerifyException(errno.EINVAL,
                                  '{0} Bad interface name. Allowed values tap/tun[0-9].'.format(node['dev']))
	
        if node['server_bridge_extended']:
            try:
                bridge_ip = ipaddress.ip_address(node['server_bridge_ip'])
                netmask = node['server_bridge_netmask']									
                ip_range_begin = ipaddress.ip_address(node['server_bridge_range_begin'])
                ip_range_end = ipaddress.ip_address(node['server_bridge_range_end'])
                subnet = ipaddress.ip_network('{0}/{1}'.format(bridge_ip, netmask), strict=False) 
      
            except ValueError as e:
                raise VerifyException(errno.EINVAL, str(e))

            if (ip_range_begin not in subnet) or (ip_range_end not in subnet):
                raise VerifyException(errno.EINVAL, 
                                      'Provided range of remote client IP adresses is invalid.')			
			
            if (bridge_ip >= ip_range_begin) and (bridge_ip <= ip_range_end):
                raise VerifyException(errno.EINVAL, 
                                      'Provided bridge IP address is in the client ip range.')

        if (node['keepalive_ping_interval'] * 2) >= node['keepalive_peer_down']:
            raise VerifyException(errno.EINVAL, 'The second parameter to keepalive must be'
                                  'at least twice the value of the first parameter.'
                                  'Recommended setting is keepalive 10 60.')
				
        return ['system']
예제 #5
0
파일: _dnsbl.py 프로젝트: iecsp/modoboa
 def check_domain(self, domain, timeout):
     """Check specified domain."""
     resolver = dns.resolver.Resolver()
     try:
         answers = resolver.query(domain.name, "MX")
     except dns.resolver.NoAnswer:
         return
     ip_list = []
     for answer in answers:
         address = None
         try:
             ipaddress.ip_address(str(answer.exchange))
         except ValueError:
             try:
                 address = socket.gethostbyname(str(answer.exchange))
             except socket.gaierror:
                 pass
         else:
             address = str(answer.exchange)
         finally:
             if address is not None:
                 ip_list.append(address)
     if len(ip_list) == 0:
         return
     jobs = [
         gevent.spawn(self.query, ip_list, provider)
         for provider in self.providers]
     gevent.joinall(jobs, timeout)
     for job in jobs:
         if not job.successful():
             continue
         provider, results = job.value
         self.store_domain_result(domain, provider, results)
예제 #6
0
def test_serialization(write_client):
    SerializationDoc.init()
    write_client.index(index='test-serialization', doc_type='doc', id=42,
                       body={
                           'i': [1, 2, "3", None],
                           'b': [True, False, "true", "false", None],
                           'd': [0.1, "-0.1", None],
                           "bin": ['SGVsbG8gV29ybGQ=', None],
                           'ip': ['::1', '127.0.0.1', None]
                       })
    sd = SerializationDoc.get(id=42)

    assert sd.i == [1, 2, 3, None]
    assert sd.b == [True, False, True, False, None]
    assert sd.d == [0.1, -0.1, None]
    assert sd.bin == [b'Hello World', None]
    assert sd.ip == [ip_address(u'::1'), ip_address(u'127.0.0.1'), None]

    assert sd.to_dict() == {
        'b': [True, False, True, False, None],
        'bin': ['SGVsbG8gV29ybGQ=', None],
        'd': [0.1, -0.1, None],
        'i': [1, 2, 3, None],
        'ip': ['::1', '127.0.0.1', None]
    }
예제 #7
0
파일: vpp_ipsec.py 프로젝트: vpp-dev/vpp
    def __init__(self, test, id, spi,
                 integ_alg, integ_key,
                 crypto_alg, crypto_key,
                 proto,
                 tun_src=None, tun_dst=None,
                 flags=None):
        e = VppEnum.vl_api_ipsec_sad_flags_t
        self.test = test
        self.id = id
        self.spi = spi
        self.integ_alg = integ_alg
        self.integ_key = integ_key
        self.crypto_alg = crypto_alg
        self.crypto_key = crypto_key
        self.proto = proto

        self.tun_src = tun_src
        self.tun_dst = tun_dst
        if not flags:
            self.flags = e.IPSEC_API_SAD_FLAG_NONE
        else:
            self.flags = flags
        if (tun_src):
            self.tun_src = ip_address(text_type(tun_src))
            self.flags = self.flags | e.IPSEC_API_SAD_FLAG_IS_TUNNEL
            if (self.tun_src.version == 6):
                self.flags = self.flags | e.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6
        if (tun_dst):
            self.tun_dst = ip_address(text_type(tun_dst))
예제 #8
0
def validate_ip(ip):
    try:
        ipaddress.ip_address(ip)
        return True
    except ValueError:
        pass
    return False
	def login(self, user, passwd, peer):
		if user == "root" and config.OpenWebif.no_root_access.value:
			# Override "no root" for logins from local/private networks
			samenet = False
			networks = getAllNetworks()
			if networks:
				for network in networks:
					if ipaddress.ip_address(unicode(peer)) in ipaddress.ip_network(unicode(network), strict=False):
						samenet=True
			if not (ipaddress.ip_address(unicode(peer)).is_private or samenet):
				return False
		from crypt import crypt
		from pwd import getpwnam
		from spwd import getspnam
		cpass = None
		try:
			cpass = getpwnam(user)[1]
		except:
			return False
		if cpass:
			if cpass == 'x' or cpass == '*':
				try:
					cpass = getspnam(user)[1]
				except:
					return False
			return crypt(passwd, cpass) == cpass
		return False
예제 #10
0
def changeHsmInfo():
	global IP
	global PORT

	ipBak = IP
	portBak = PORT

	ip = input('please input your ip:')
	port = input('please input your port:')
	try:
		ipaddress.ip_address(ip)
		IP = ip
	except ValueError:
		print('input ip error')
		return -1
	
	try:
		PORT = int(port)
	except ValueError :
		print('input port error')
		return -1

	
	if ReCreatTcpConntion() < 0:
		print('can\'t connect ip [%s] port [%d]' % (IP,PORT) )
		print('connecting defult ip [%s] port [%d]' % (ipBak,portBak))
		IP = ipBak
		PORT = portBak
		return ReCreatTcpConntion()
	else:
		pass

	pass
예제 #11
0
def index():

    if request.method == 'GET':
        return 'OK'

    elif request.method == 'POST':
        # Check if the POST request if from github.com
        for block in hook_blocks:
            ip = ipaddress.ip_address(u'%s' % request.remote_addr)
            if ipaddress.ip_address(ip) in ipaddress.ip_network(block):
                break #the remote_addr is within the network range of github
        else:
            abort(403)

        if request.headers.get('X-GitHub-Event') == "ping":
            return json.dumps({'msg': 'Hi!'})
        if request.headers.get('X-GitHub-Event') != "push":
            return json.dumps({'msg': "wrong event type"})

        payload = json.loads(request.data)
        repo_name = payload['repository']['full_name']
        if repo_name in repos:
            local_path = repos[repo_name]
        else:
            abort(404)

        if not os.path.exists(local_path):
            os.makedirs(local_path, 0755)
            gitClone(local_path, '[email protected]:{0}.git'.format(repo_name))
            return 'Cloned repo'

        gitPull(local_path)

        return 'OK'
예제 #12
0
 def f():
     addr = self.environ.get('REMOTE_ADDR') or self.environ[b'REMOTE_ADDR']
     addr = ip_address(addr.decode('ascii') if type(addr) is bytes else addr)
     trusted_proxies = getattr(self.website, 'trusted_proxies', None)
     forwarded_for = self.headers.get(b'X-Forwarded-For')
     self.__dict__['bypasses_proxy'] = bool(trusted_proxies)
     if not trusted_proxies or not forwarded_for:
         return addr
     for networks in trusted_proxies:
         is_trusted = False
         for network in networks:
             is_trusted = addr.is_private if network == 'private' else addr in network
             if is_trusted:
                 break
         if not is_trusted:
             return addr
         i = forwarded_for.rfind(b',')
         try:
             addr = ip_address(forwarded_for[i+1:].decode('ascii').strip())
         except (UnicodeDecodeError, ValueError):
             return addr
         if i == -1:
             if networks is trusted_proxies[-1]:
                 break
             return addr
         forwarded_for = forwarded_for[:i]
     self.__dict__['bypasses_proxy'] = False
     return addr
예제 #13
0
파일: certs.py 프로젝트: adolia/mitmproxy
def dummy_cert(privkey, cacert, commonname, sans):
    """
        Generates a dummy certificate.

        privkey: CA private key
        cacert: CA certificate
        commonname: Common name for the generated certificate.
        sans: A list of Subject Alternate Names.

        Returns cert if operation succeeded, None if not.
    """
    ss = []
    for i in sans:
        try:
            ipaddress.ip_address(i.decode("ascii"))
        except ValueError:
            ss.append(b"DNS:%s" % i)
        else:
            ss.append(b"IP:%s" % i)
    ss = b", ".join(ss)

    cert = OpenSSL.crypto.X509()
    cert.gmtime_adj_notBefore(-3600 * 48)
    cert.gmtime_adj_notAfter(DEFAULT_EXP)
    cert.set_issuer(cacert.get_subject())
    if commonname is not None and len(commonname) < 64:
        cert.get_subject().CN = commonname
    cert.set_serial_number(int(time.time() * 10000))
    if ss:
        cert.set_version(2)
        cert.add_extensions(
            [OpenSSL.crypto.X509Extension(b"subjectAltName", False, ss)])
    cert.set_pubkey(cacert.get_pubkey())
    cert.sign(privkey, "sha256")
    return SSLCert(cert)
 def _rm_host_num_and_add_prefix(self, ip_str, prefix_str):
     # Strips the host number off of the ip address based on the prefix.
     # returns the prefix appended to the network id
     # examples:
     #     add_prefix('1.1.1.1', '255.255.255.252')
     #         return '1.1.1.0/255.255.255.252'
     #     add_prefix('1.1.1.1', '16')
     #         return '1.1.0.0/16'
     #     add_prefix('1.1.1.1', '16')
     #         return '1.1.0.0/16'
     #     add_prefix('2001:db8:3c4d:15:abcd:1234:5678:9abc', '65')
     #        return '2001:db8:3c4d:15:8000::/65'
     #     add_prefix('2001:db8:3c4d:15:abcd:1234:5678:9abc', 'FFFF:FFFF::')
     #        return '2001:db8::/FFFF:FFFF::'
     assert isinstance(ip_str, str)
     assert isinstance(prefix_str, str)
     try:
         prefix_val = int(ip_address(prefix_str))
     except ValueError:
         if ip_address(ip_str).version == 4:
             prefix_val = (0xFFFFFFFF << (32 - int(prefix_str)))
         else:
             prefix_val = ((2**128 - 1) << (128 - int(prefix_str)))
     ip_val = int(ip_address(ip_str)) & prefix_val
     if ip_address(ip_str).version == 4:
         ip_str = str(IPv4Address(ip_val))
     else:
         ip_str = str(IPv6Address(ip_val))
     return (ip_str + '/' + prefix_str)
예제 #15
0
def _is_url_naive(urlstr):
    """Naive check if given URL is really a URL.

    Args:
        urlstr: The URL to check for, as string.

    Return:
        True if the URL really is a URL, False otherwise.
    """
    url = qurl_from_user_input(urlstr)
    try:
        ipaddress.ip_address(urlstr)
    except ValueError:
        pass
    else:
        # Valid IPv4/IPv6 address
        return True

    # Qt treats things like "23.42" or "1337" or "0xDEAD" as valid URLs
    # which we don't want to. Note we already filtered *real* valid IPs
    # above.
    if not QHostAddress(urlstr).isNull():
        return False

    if not url.isValid():
        return False
    elif '.' in url.host():
        return True
    elif url.host() == 'localhost':
        return True
    else:
        return False
예제 #16
0
파일: models.py 프로젝트: erudit/eruditorg
 def ip_addresses(self):
     """ Returns the list of IP addresses contained in the current range. """
     start = ipaddress.ip_address(self.ip_start)
     end = ipaddress.ip_address(self.ip_end)
     return reduce(
         lambda ips, ipn: ips + list(ipn),
         ipaddress.summarize_address_range(start, end), [])
def GetSrcIPAddr(AF):

    if not AF:
        return None

    AddrList = []

    for interface in netifaces.interfaces():
        if netifaces.ifaddresses(interface):
            if AF in netifaces.ifaddresses(interface):
                for ipaddr in netifaces.ifaddresses(interface)[AF]:
                    CurrentIPitem = ipaddress.ip_address(ipaddr['addr'].rsplit('%',1)[0])

                    if not CurrentIPitem.is_loopback and \
                        not CurrentIPitem.is_link_local and \
                        not CurrentIPitem.is_multicast and \
                        not CurrentIPitem.is_unspecified and \
                        not CurrentIPitem.is_reserved:
                        AddrList.append(ipaddr['addr'])


    if AddrList:
        return ipaddress.ip_address(AddrList[0]) #Just Return the first Usable IP

    else:
        return None
async def test_bypass_login_flow(manager_bypass_login, provider_bypass_login):
    """Test login flow can be bypass if only one user available."""
    owner = await manager_bypass_login.async_create_user("test-owner")

    # not from trusted network
    flow = await provider_bypass_login.async_login_flow(
        {'ip_address': ip_address('127.0.0.1')})
    step = await flow.async_step_init()
    assert step['type'] == 'abort'
    assert step['reason'] == 'not_whitelisted'

    # from trusted network, only one available user, bypass the login flow
    flow = await provider_bypass_login.async_login_flow(
        {'ip_address': ip_address('192.168.0.1')})
    step = await flow.async_step_init()
    assert step['type'] == 'create_entry'
    assert step['data']['user'] == owner.id

    user = await manager_bypass_login.async_create_user("test-user")

    # from trusted network, two available user, show up login form
    flow = await provider_bypass_login.async_login_flow(
        {'ip_address': ip_address('192.168.0.1')})
    step = await flow.async_step_init()
    schema = step['data_schema']
    # both owner and user listed
    assert schema({'user': owner.id})
    assert schema({'user': user.id})
예제 #19
0
파일: expert.py 프로젝트: pkug/intelmq
    def process(self):
        event = self.receive_message()

        ipfields = [field for field in self.ipfields[self.parameters.srcdest] \
                    if event.contains(field)]
        asnfields = [field for field in self.asnfields[self.parameters.srcdest] \
                     if event.contains(field)]

        # Skip blacklisted IPs or ranges
        for iprange in self.parameters.ipexcept:
            for ipf in (event.value(ipfield) for ipfield in ipfields):
                if ip_address(ipf) in ip_network(iprange):
                    self.logger.debug("Blacklisted IP %s, ignoring", ipf)
                    self.acknowledge_message()
                    return

        # ASNs
        for asn in self.parameters.asns:
            for asnf in (event.value(asnfield) for asnfield in asnfields):
                if str(asnf) == asn:
                    self.logger.debug("ASN %s matched", asnf)
                    self.send_message(event)
                    self.acknowledge_message()
                    return

        # IPs
        for iprange in self.parameters.ipranges:
            for ipf in (event.value(ipfield) for ipfield in ipfields):
                if ip_address(ipf) in ip_network(iprange):
                    self.logger.debug("IP %s matched", ipf)
                    self.send_message(event)
                    self.acknowledge_message()
                    return

        self.acknowledge_message()
예제 #20
0
파일: models.py 프로젝트: ebhoren/eruditorg
 def clean(self):
     super(InstitutionIPAddressRange, self).clean()
     start = ipaddress.ip_address(self.ip_start)
     end = ipaddress.ip_address(self.ip_end)
     if start > end:
         raise ValidationError(_(
             'L\'adresse IP de début doit être inférieure à l\'adresse IP de fin'))
async def test_login_flow(manager, provider):
    """Test login flow."""
    owner = await manager.async_create_user("test-owner")
    user = await manager.async_create_user("test-user")

    # not from trusted network
    flow = await provider.async_login_flow(
        {'ip_address': ip_address('127.0.0.1')})
    step = await flow.async_step_init()
    assert step['type'] == 'abort'
    assert step['reason'] == 'not_whitelisted'

    # from trusted network, list users
    flow = await provider.async_login_flow(
        {'ip_address': ip_address('192.168.0.1')})
    step = await flow.async_step_init()
    assert step['step_id'] == 'init'

    schema = step['data_schema']
    assert schema({'user': owner.id})
    with pytest.raises(vol.Invalid):
        assert schema({'user': '******'})

    # login with valid user
    step = await flow.async_step_init({'user': user.id})
    assert step['type'] == 'create_entry'
    assert step['data']['user'] == user.id
예제 #22
0
파일: models.py 프로젝트: itnihao/pytin
    def is_valid_address(address):
        try:
            ipaddress.ip_address(unicode(address))
        except:
            return False

        return True
예제 #23
0
def growSub(br,g='/24'):
	if br is None:
		return None
	ll=len(br)
	brr=[['0.0.0.0','0.0.0.0']]*ll
	for i,w in enumerate(br):
		temp=list(w)
		pref,l=commonPrefix(w)
		if l <= int(g.strip('/')):
			brr[i]=w
		else:
			subg=subnet(pref)
			while (l > int(g.strip('/'))):
				subg=subg.grow()
				l=l-1
				net=ix.ip_network(unicode(subg.string()))
				if (ll==1):
					temp=[subg.first(),subg.last()]
				elif (i==0 and (ix.ip_address(unicode(ipClass(br[i+1][0]).first(g))) not in net)):
					temp=[subg.first(),subg.last()]
				elif(i==(ll-1) and (ix.ip_address(unicode(ipClass(br[i-1][1]).last(g))) not in net)):
					temp=[subg.first(),subg.last()]
				elif ((ix.ip_address(unicode(ipClass(br[i-1][1]).last(g))) not in net) and (ix.ip_address(unicode(ipClass(br[i+1][0]).first(g))) not in net)):
					temp=[subg.first(),subg.last()]
				else : break				
				brr[i]=temp
	return brr
예제 #24
0
파일: util.py 프로젝트: CivBase/oppy
    def parse(data, offset):
        '''Parse and extract TLVTriple fields from a byte string.

        :param str data: byte string to parse
        :param int offset: offset in str data where we should start
            reading
        :returns: :class:`~oppy.cell.util.TLVTriple`
        '''
        addr_type = struct.unpack('!B', data[offset:offset +
                                             TLV_ADDR_TYPE_LEN])[0]
        offset += TLV_ADDR_TYPE_LEN

        # use addr_len for hostname types
        addr_len  = struct.unpack('!B', data[offset:offset +
                                             TLV_ADDR_LEN_LEN])[0]
        offset += TLV_ADDR_LEN_LEN

        if addr_type == DEF.IPv4_ADDR_TYPE:
            value = data[offset:offset + DEF.IPv4_ADDR_LEN]
            offset += DEF.IPv4_ADDR_LEN
            value = ipaddress.ip_address(value).exploded
        elif addr_type == DEF.IPv6_ADDR_TYPE:
            value = data[offset:offset + DEF.IPv6_ADDR_LEN]
            offset += DEF.IPv6_ADDR_LEN
            value = ipaddress.ip_address(value).exploded
        else:
            msg = "TLVTriple can't parse type {0} yet.".format(addr_type)
            raise ValueError(msg)

        return TLVTriple(value)
예제 #25
0
def testVector(suffle=False):
	g='/20'
	net1=ix.ip_network(unicode('0.0.0.0'+g))
	lIP=list(net1.hosts())
	lIP=[ix.ip_address(unicode(subnet(str(net1)).first()))]+lIP+[ix.ip_address(unicode(subnet(str(net1)).last()))]
	l=len(lIP)
	C=['0.0.0.0/20']*l
	net2=ix.ip_network(u'0.0.4.0/22')
	net3=ix.ip_network(u'0.0.6.0/24')
	net4=ix.ip_network(u'0.0.14.0/23')
	net5=ix.ip_network(u'0.0.8.0/24')
	net6=ix.ip_network(u'0.0.9.0/24')
	for i,w in enumerate(lIP):
		if w in net2:
			C[i]='0.0.4.0/22'
		if w in net3:
			C[i]='0.0.6.0/24'
		if w in net4:
			C[i]='0.0.14.0/23'
		if w in net5:
			C[i]='0.0.8.0/24'
		if w in net6:
			C[i]='0.0.9.0/24'
	if shuffle:
		x=range(l)
		shuffle(x)
		E=int((99.0/100.0)*l)
		ind=x[0:E]
		C = del_indices(C,ind)
		lIP= del_indices(lIP,ind)
	dic=list2dic(C,[str(xx) for xx in lIP])
	return dic.values()
예제 #26
0
파일: rproxy.py 프로젝트: ohyeah521/rproxy
    def do_GET(self):
        noxff = False
        if isinstance(self.path, bytes):
            self.path = self.path.decode('latin1')
        if self.path.lower().startswith('ftp://'):
            return self.send_error(504)
        # transparent proxy
        if self.path.startswith('/') and 'Host' in self.headers:
            self.path = 'http://%s%s' % (self.headers['Host'], self.path)
        if self.path.startswith('/'):
            return self.send_error(403)
        # redirector
        new_url = REDIRECTOR.get(self.path)
        if new_url:
            logging.debug('redirecting to %s' % new_url)
            if new_url.isdigit() and 400 <= int(new_url) < 600:
                return self.send_error(int(new_url))
            elif new_url in conf.parentdict.keys():
                self._proxylist = [new_url]
            elif new_url.lower() == 'noxff':
                noxff = True
            else:
                return self.redirect(new_url)

        if 'Host' not in self.headers:
            self.headers['Host'] = urlparse.urlparse(self.path).netloc

        if 'ss-realip' in self.headers:  # should exist in first request only
            self.ssrealip = self.headers['ss-realip']
        del self.headers['ss-realip']

        if 'ss-client' in self.headers:  # should exist in first request only
            self.ssclient = self.headers['ss-client']
        del self.headers['ss-client']

        self.requesthost = parse_hostport(self.headers['Host'], 80)

        if self._request_localhost(self.requesthost):
            if ip_address(self.client_address[0]).is_loopback and self.requesthost[1] in (conf.listen[1], conf.listen[1] + 1):
                self.send_response(200)
                msg = 'Hello World !'
                self.send_header('Content-type', 'text/html')
                self.send_header('Content-Length', str(len(msg)))
                self.send_header('Connection', 'keep_alive')
                self.end_headers()
                # Send the html message
                self.wfile.write(msg)
                return
            if not ip_address(self.client_address[0]).is_loopback:
                return self.send_error(403)
        self.shortpath = '%s%s' % (self.path.split('?')[0], '?' if len(self.path.split('?')) > 1 else '')

        if conf.xheaders and self.ssrealip:
            ipl = [ip.strip() for ip in self.headers.get('X-Forwarded-For', '').split(',') if ip.strip()]
            ipl.append(self.ssrealip)
            self.headers['X-Forwarded-For'] = ', '.join(ipl)
        if noxff:
            del self.headers['X-Forwarded-For']

        self._do_GET()
def ip_range_max(network, exclude):
    """Return tuple of low, high IP address for largest IP address range within
    the given network.

    Accepts a list of IP addresses to exclude.
    """
    if (network.num_addresses <= 2) or (len(exclude) == 0):
        return ip_range(network)

    current = range(0, 0)
    remaining = range(int(network[1]), int(network[-1]))
    excluded = sorted(set(exclude))
    for ex in excluded:
        e = int(ex)
        if e in remaining:
            index = remaining.index(e)
            if index != 0:
                r = remaining[:index]
                if len(r) > len(current):
                    current = r
            index += 1
            if index < len(remaining):
                remaining = remaining[index:]
            else:
                remaining = range(0, 0)
                break

    length = len(current)
    if length < len(remaining):
        current = remaining
    elif length == 0:
        return ip_range(network)

    return ip_address(current[0]), ip_address(current[-1])
예제 #28
0
def bracket2sub(bracket,a1,b1,g):
	gi=int(g.strip('/'))
	# all IPs should be in form of strings
	out=''
	if a1 is None:
		a=-1
	else:
		a=int(ix.ip_address(unicode(a1)))
	if b1 is None:
		b=2**33
	else:
		b=int(ix.ip_address(unicode(b1)))
	sub,pl=commonPrefix(bracket)
	rootsub=subnet(sub)
	re=ipClass(rootsub.first()).int()
	le=ipClass(rootsub.last()).int()
	root=ix.ip_network(unicode(sub))
	if ((re <= a) or (le >= b)):
		if pl < gi :
			L,R=rootsub.split()
			brL=[L.first(),L.last()]
			brR=[R.first(),R.last()]
			out=(out+' U '+bracket2sub(brL,a1,b1,g)).strip('U ')
			out=(out+' U '+bracket2sub(brR,a1,b1,g)).strip('U ')
		else:
			return out
	else:
		return rootsub.string()
	out=out.strip('U ')
	return out
예제 #29
0
파일: utils.py 프로젝트: adampav/Ansible
    def validate(raise_f=True, **kwargs):
        needed_args = ["id", "state", "addr"]
        acceptable = {
            "state": ["absent", "present"]
        }

        for arg in needed_args:
            if arg not in kwargs or not kwargs[arg]:
                if raise_f:
                    raise ValueError
                else:
                    return False

        if not isinstance(kwargs["id"], int):
            if raise_f:
                raise TypeError("id is not an Integer")
            else:
                return False

        if kwargs["state"] != "present" and kwargs["state"] != "absent":
            print("\nValue not acceptable!\n"+json.dumps(acceptable["state"], indent=4))
            if raise_f:
                raise ValueError("Invalid state value. Select one from the list.")
            else:
                return False

        try:
            ipaddress.ip_address(kwargs["addr"])
        except ValueError:
            if raise_f:
                ipaddress.ip_address(kwargs["addr"])
            else:
                return False

        return True
예제 #30
0
    def test_should_create_IPv6Header_when_from_bytes_classmethod_is_called(self):
        # GIVEN
        traffic_class = any_traffic_class()
        flow_label = any_flow_label()
        payload_length = any_payload_length()
        next_header = any_next_header()
        hop_limit = any_hop_limit()
        source_address = any_ip_address()
        destination_address = any_ip_address()

        data = bytearray([(6 << 4) | (traffic_class >> 4),
                          (traffic_class & 0xF) << 4 | (flow_label >> 16) & 0xF,
                          (flow_label >> 8) & 0xFF, flow_label & 0xFF,
                          payload_length >> 8, payload_length & 0xFF,
                          next_header, hop_limit])
        data += ip_address(bytes(source_address)).packed + ip_address(bytes(destination_address)).packed

        # WHEN
        ipv6_header = IPv6Header.from_bytes(io.BytesIO(data))

        # THEN
        self.assertEqual(6, ipv6_header.version)
        self.assertEqual(traffic_class, ipv6_header.traffic_class)
        self.assertEqual(flow_label, ipv6_header.flow_label)
        self.assertEqual(payload_length, ipv6_header.payload_length)
        self.assertEqual(next_header, ipv6_header.next_header)
        self.assertEqual(hop_limit, ipv6_header.hop_limit)
        self.assertEqual(source_address, ipv6_header.source_address.packed)
        self.assertEqual(destination_address, ipv6_header.destination_address.packed)
예제 #31
0
def _decode_general_name(backend, gn):
    if gn.type == backend._lib.GEN_DNS:
        data = _asn1_string_to_bytes(backend, gn.d.dNSName)
        if not data:
            decoded = u""
        elif data.startswith(b"*."):
            # This is a wildcard name. We need to remove the leading wildcard,
            # IDNA decode, then re-add the wildcard. Wildcard characters should
            # always be left-most (RFC 2595 section 2.4).
            decoded = u"*." + idna.decode(data[2:])
        else:
            # Not a wildcard, decode away. If the string has a * in it anywhere
            # invalid this will raise an InvalidCodePoint
            decoded = idna.decode(data)
            if data.startswith(b"."):
                # idna strips leading periods. Name constraints can have that
                # so we need to re-add it. Sigh.
                decoded = u"." + decoded

        return x509.DNSName(decoded)
    elif gn.type == backend._lib.GEN_URI:
        data = _asn1_string_to_ascii(backend, gn.d.uniformResourceIdentifier)
        parsed = urllib_parse.urlparse(data)
        if parsed.hostname:
            hostname = idna.decode(parsed.hostname)
        else:
            hostname = ""
        if parsed.port:
            netloc = hostname + u":" + six.text_type(parsed.port)
        else:
            netloc = hostname

        # Note that building a URL in this fashion means it should be
        # semantically indistinguishable from the original but is not
        # guaranteed to be exactly the same.
        uri = urllib_parse.urlunparse(
            (parsed.scheme, netloc, parsed.path, parsed.params, parsed.query,
             parsed.fragment))
        return x509.UniformResourceIdentifier(uri)
    elif gn.type == backend._lib.GEN_RID:
        oid = _obj2txt(backend, gn.d.registeredID)
        return x509.RegisteredID(x509.ObjectIdentifier(oid))
    elif gn.type == backend._lib.GEN_IPADD:
        data = _asn1_string_to_bytes(backend, gn.d.iPAddress)
        data_len = len(data)
        if data_len == 8 or data_len == 32:
            # This is an IPv4 or IPv6 Network and not a single IP. This
            # type of data appears in Name Constraints. Unfortunately,
            # ipaddress doesn't support packed bytes + netmask. Additionally,
            # IPv6Network can only handle CIDR rather than the full 16 byte
            # netmask. To handle this we convert the netmask to integer, then
            # find the first 0 bit, which will be the prefix. If another 1
            # bit is present after that the netmask is invalid.
            base = ipaddress.ip_address(data[:data_len // 2])
            netmask = ipaddress.ip_address(data[data_len // 2:])
            bits = bin(int(netmask))[2:]
            prefix = bits.find('0')
            # If no 0 bits are found it is a /32 or /128
            if prefix == -1:
                prefix = len(bits)

            if "1" in bits[prefix:]:
                raise ValueError("Invalid netmask")

            ip = ipaddress.ip_network(base.exploded + u"/{0}".format(prefix))
        else:
            ip = ipaddress.ip_address(data)

        return x509.IPAddress(ip)
    elif gn.type == backend._lib.GEN_DIRNAME:
        return x509.DirectoryName(
            _decode_x509_name(backend, gn.d.directoryName))
    elif gn.type == backend._lib.GEN_EMAIL:
        data = _asn1_string_to_ascii(backend, gn.d.rfc822Name)
        name, address = parseaddr(data)
        parts = address.split(u"@")
        if name or not address:
            # parseaddr has found a name (e.g. Name <email>) or the entire
            # value is an empty string.
            raise ValueError("Invalid rfc822name value")
        elif len(parts) == 1:
            # Single label email name. This is valid for local delivery. No
            # IDNA decoding can be done since there is no domain component.
            return x509.RFC822Name(address)
        else:
            # A normal email of the form [email protected]. Let's attempt to
            # decode the domain component and return the entire address.
            return x509.RFC822Name(parts[0] + u"@" + idna.decode(parts[1]))
    elif gn.type == backend._lib.GEN_OTHERNAME:
        type_id = _obj2txt(backend, gn.d.otherName.type_id)
        value = _asn1_to_der(backend, gn.d.otherName.value)
        return x509.OtherName(x509.ObjectIdentifier(type_id), value)
    else:
        # x400Address or ediPartyName
        raise x509.UnsupportedGeneralNameType(
            "{0} is not a supported type".format(
                x509._GENERAL_NAMES.get(gn.type, gn.type)), gn.type)
예제 #32
0
 def isValidIp(self, observable):
     try:
         ipaddress.ip_address(observable)
         return True
     except:
         return False
예제 #33
0
파일: factory.py 프로젝트: mask-pp/eggroll
def wrap_host_scheme(host):
    try:
        ip = ipaddress.ip_address(host)
        return f'ipv{ip.version}:{host}'
    except ValueError as e:
        return host
예제 #34
0
	def __init__(self, jail, name, remote):
		super(RemoteAction, self).__init__(jail, name)
		self.remote = ipaddress.ip_address(remote)
예제 #35
0
    def load_config(self, directory, bitmap_name, config_name):
        ''' Initializes all values from the configuration file
          directory   - path to directory with bitmaps and configuration
          bitmap_name - bitmap filename (excluding suffixes)
          config_nam  - name of the configuration file (including suffixes)
      '''

        self.config_name = config_name + '.yaml'
        self.bitmap_filename = bitmap_name
        self.directory = directory

        # Read configuration file
        config_file = self.validate_config(self.directory, self.config_name,
                                           'online')

        if config_file is None:
            sys.exit(1)

        # Get time window and interval
        self.time_window = int(
            config_file[self.bitmap_filename]['time']['window'])
        self.time_granularity = int(
            config_file[self.bitmap_filename]['time']['granularity'])
        self.time_first = datetime.datetime.strptime(
            str(config_file[self.bitmap_filename]['time']['first']),
            self.time_format)
        self.intervals = int(
            config_file[self.bitmap_filename]['time']['intervals'])

        # Check mode
        if (('end' in config_file[self.bitmap_filename]['module'])
                and ('last' in config_file[self.bitmap_filename]['time'])):
            self.mode = 'offline'
            if config_file[
                    self.bitmap_filename]['time']['last'] == 'undefined':
                print(
                    'Last record time is undefined > less than one flow recorded.',
                    file=sys.stderr)
                sys.exit(1)
            self.time_last = datetime.datetime.strptime(
                str(config_file[self.bitmap_filename]['time']['last']),
                self.time_format)
        elif (('end' in config_file[self.bitmap_filename]['module'])
              or ('last' in config_file[self.bitmap_filename]['time'])):
            print('Configuration file structure is invalid.', file=sys.stderr)
            sys.exit(1)
        else:
            # Calculate intervals from elapsed time and interval length
            # Last time is current time
            self.mode = 'online'
            length = (self.time_window if
                      (self.intervals > self.time_window) else self.intervals)
            self.time_last = self.time_first + datetime.timedelta(
                seconds=length * self.time_granularity)

        # Get IPs
        if sys.version_info[0] == 2:
            self.first_ip = ipaddress.ip_address(
                unicode(
                    config_file[self.bitmap_filename]['addresses']['first'],
                    "utf-8"))
            self.last_ip = ipaddress.ip_address(
                unicode(config_file[self.bitmap_filename]['addresses']['last'],
                        "utf-8"))

        else:
            self.first_ip = ipaddress.ip_address(
                config_file[self.bitmap_filename]['addresses']['first'])

            self.last_ip = ipaddress.ip_address(
                config_file[self.bitmap_filename]['addresses']['last'])

        self.ip_granularity = int(
            config_file[self.bitmap_filename]['addresses']['granularity'])
        self.ip_size = self.first_ip.max_prefixlen

        self.bit_vector_size = self.get_index_from_ip(str(self.first_ip),
                                                      str(self.last_ip),
                                                      self.ip_granularity)
        self.byte_vector_size = int(math.ceil(self.bit_vector_size / 8))
        return
예제 #36
0
 def _is_ipv4_address(ip_addr):
     return ipaddress.ip_address(ip_addr).version == 4
예제 #37
0
async def connect_tcp(remote_host,
                      remote_port,
                      *,
                      local_host=None,
                      tls=False,
                      ssl_context=None,
                      tls_standard_compatible=True,
                      tls_hostname=None,
                      happy_eyeballs_delay=0.25):
    """
    Connect to a host using the TCP protocol.

    This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555).
    If ``address`` is a host name that resolves to multiple IP addresses, each one is tried until
    one connection attempt succeeds. If the first attempt does not connected within 250
    milliseconds, a second attempt is started using the next address in the list, and so on.
    On IPv6 enabled systems, an IPv6 address (if available) is tried first.

    When the connection has been established, a TLS handshake will be done if either
    ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.

    :param remote_host: the IP address or host name to connect to
    :param remote_port: port on the target host to connect to
    :param local_host: the interface address or name to bind the socket to before connecting
    :param tls: ``True`` to do a TLS handshake with the connected stream and return a
        :class:`~anyio.streams.tls.TLSStream` instead
    :param ssl_context: the SSL context object to use (if omitted, a default context is created)
    :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing
        the stream and requires that the server does this as well. Otherwise,
        :exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
        Some protocols, such as HTTP, require this option to be ``False``.
        See :meth:`~ssl.SSLContext.wrap_socket` for details.
    :param tls_hostname: host name to check the server certificate against (defaults to the value
        of ``remote_host``)
    :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt
    :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
    :raises OSError: if the connection attempt fails

    """
    # Placed here due to https://github.com/python/mypy/issues/7057
    connected_stream: Optional[SocketStream] = None

    async def try_connect(remote_host: str, event: Event):
        nonlocal connected_stream
        try:
            stream = await asynclib.connect_tcp(remote_host, remote_port,
                                                local_address)
        except OSError as exc:
            oserrors.append(exc)
            return
        else:
            if connected_stream is None:
                connected_stream = stream
                await tg.cancel_scope.cancel()
            else:
                await stream.aclose()
        finally:
            await event.set()

    asynclib = get_asynclib()
    local_address: Optional[IPSockAddrType] = None
    family = socket.AF_UNSPEC
    if local_host:
        gai_res = await getaddrinfo(str(local_host), None)
        family, *_, local_address = gai_res[0]

    target_host = str(remote_host)
    try:
        addr_obj = ip_address(remote_host)
    except ValueError:
        # getaddrinfo() will raise an exception if name resolution fails
        gai_res = await getaddrinfo(target_host,
                                    remote_port,
                                    family=family,
                                    type=socket.SOCK_STREAM)

        # Organize the list so that the first address is an IPv6 address (if available) and the
        # second one is an IPv4 addresses. The rest can be in whatever order.
        v6_found = v4_found = False
        target_addrs: List[Tuple[socket.AddressFamily, str]] = []
        for af, *rest, sa in gai_res:
            if af == socket.AF_INET6 and not v6_found:
                v6_found = True
                target_addrs.insert(0, (af, sa[0]))
            elif af == socket.AF_INET and not v4_found and v6_found:
                v4_found = True
                target_addrs.insert(1, (af, sa[0]))
            else:
                target_addrs.append((af, sa[0]))
    else:
        if isinstance(addr_obj, IPv6Address):
            target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
        else:
            target_addrs = [(socket.AF_INET, addr_obj.compressed)]

    oserrors: List[OSError] = []
    async with create_task_group() as tg:
        for i, (af, addr) in enumerate(target_addrs):
            event = create_event()
            await tg.spawn(try_connect, addr, event)
            async with move_on_after(happy_eyeballs_delay):
                await event.wait()

    if connected_stream is None:
        cause = oserrors[0] if len(oserrors) == 1 else asynclib.ExceptionGroup(
            oserrors)
        raise OSError('All connection attempts failed') from cause

    if tls or tls_hostname or ssl_context:
        try:
            return await TLSStream.wrap(
                connected_stream,
                server_side=False,
                hostname=tls_hostname or remote_host,
                ssl_context=ssl_context,
                standard_compatible=tls_standard_compatible)
        except BaseException:
            await aclose_forcefully(connected_stream)
            raise

    return connected_stream
예제 #38
0
def generate_log(log_name):
    global DATE
    date = time.strftime('%Y-%m-%d')
    if date != DATE:
        archive(DATE)
        DATE = date
    INFO = os.path.join(LOGS_PATH, 'info', f'{DATE}.log')

    log_stem = log_name
    log_root = os.path.join(LOGS_PATH, log_name)
    log_uuid = re.match(
        r'.*?-(?P<uuid>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})',
        log_stem, re.IGNORECASE).group('uuid')

    log_file = os.path.join(log_root, 'files.log')
    if not os.path.isfile(log_file):
        return

    LOG_FILE = parse(log_file)
    LOG_CONN = parse(os.path.join(log_root, 'conn.log'))
    for line in LOG_FILE.context.itertuples():
        if is_nan(getattr(line, 'extracted', None)):
            continue
        hosts = [
            dict(tx=ipaddress.ip_address(tx), rx=ipaddress.ip_address(rx))
            for (tx, rx) in zip(line.tx_hosts, line.rx_hosts)
        ]

        conns = list()
        is_orig = line.is_orig
        for conn_uid in line.conn_uids:
            record = next(
                LOG_CONN.context[lambda df: df.uid == conn_uid].iterrows())[1]  # pylint: disable=cell-var-from-loop
            if is_orig:
                conn = dict(
                    src_h=ipaddress.ip_address(record['id.orig_h']),
                    src_p=int(record['id.orig_p']),
                    dst_h=ipaddress.ip_address(record['id.resp_h']),
                    dst_p=int(record['id.resp_p']),
                )
            else:
                conn = dict(
                    src_h=ipaddress.ip_address(record['id.resp_h']),
                    src_p=int(record['id.resp_p']),
                    dst_h=ipaddress.ip_address(record['id.orig_h']),
                    dst_p=int(record['id.orig_p']),
                )
            conns.append(conn)

        local_name = line.extracted
        mime_type = None
        dump_path = os.path.join(DUMP_PATH, local_name)
        if os.path.exists(dump_path):
            with contextlib.suppress(Exception):
                mime_type = magic.detect_from_filename(dump_path).mime_type
            # if mime_type is None or MIME_REGEX.match(mime_type) is None:
            #     if MIME_MODE:
            #         local_name = rename_dump(local_name, line.mime_type)
            # else:
            #     if MIME_MODE or (mime_type != line.mime_type):  # pylint: disable=else-if-used
            #         local_name = rename_dump(local_name, mime_type)
        else:
            dump_path = None

        info = dict(timestamp=line.ts
                    if LOG_FILE.format == 'json' else line.ts.timestamp(),
                    log_uuid=log_uuid,
                    log_path=log_root,
                    log_name=log_stem,
                    dump_path=dump_path,
                    local_name=local_name,
                    source_name=getattr(line, 'filename', None),
                    hosts=hosts,
                    conns=conns,
                    bro_mime_type=line.mime_type,
                    real_mime_type=mime_type,
                    hash=dict(
                        md5=getattr(line, 'md5', None),
                        sha1=getattr(line, 'sha1', None),
                        sha256=getattr(line, 'sha256', None),
                    ))
        print_file(json.dumps(info, cls=IPAddressJSONEncoder), file=INFO)
예제 #39
0
    def is_secure_origin(self, location):
        # type: (Link) -> bool
        # Determine if this url used a secure transport mechanism
        parsed = urllib.parse.urlparse(str(location))
        origin_protocol, origin_host, origin_port = (
            parsed.scheme,
            parsed.hostname,
            parsed.port,
        )

        # The protocol to use to see if the protocol matches.
        # Don't count the repository type as part of the protocol: in
        # cases such as "git+ssh", only use "ssh". (I.e., Only verify against
        # the last scheme.)
        origin_protocol = origin_protocol.rsplit('+', 1)[-1]

        # Determine if our origin is a secure origin by looking through our
        # hardcoded list of secure origins, as well as any additional ones
        # configured on this PackageFinder instance.
        for secure_origin in self.iter_secure_origins():
            secure_protocol, secure_host, secure_port = secure_origin
            if origin_protocol != secure_protocol and secure_protocol != "*":
                continue

            try:
                addr = ipaddress.ip_address(None if origin_host is None else
                                            six.ensure_text(origin_host))
                network = ipaddress.ip_network(six.ensure_text(secure_host))
            except ValueError:
                # We don't have both a valid address or a valid network, so
                # we'll check this origin against hostnames.
                if (origin_host and origin_host.lower() != secure_host.lower()
                        and secure_host != "*"):
                    continue
            else:
                # We have a valid address and network, so see if the address
                # is contained within the network.
                if addr not in network:
                    continue

            # Check to see if the port matches.
            if (origin_port != secure_port and secure_port != "*"
                    and secure_port is not None):
                continue

            # If we've gotten here, then this origin matches the current
            # secure origin and we should return True
            return True

        # If we've gotten to this point, then the origin isn't secure and we
        # will not accept it as a valid location to search. We will however
        # log a warning that we are ignoring it.
        logger.warning(
            "The repository located at %s is not a trusted or secure host and "
            "is being ignored. If this repository is available via HTTPS we "
            "recommend you use HTTPS instead, otherwise you may silence "
            "this warning and allow it anyway with '--trusted-host %s'.",
            origin_host,
            origin_host,
        )

        return False
예제 #40
0
def validate_url(url, ip_whitelist):
    # If it doesn't look like a URL, ignore it.
    if not (url.lstrip().startswith('http://')
            or url.lstrip().startswith('https://')):
        return url

    # Extract hostname component
    parsed_url = urlparse(url).netloc
    # If credentials are in this URL, we need to strip those.
    if parsed_url.count('@') > 0:
        # credentials.
        parsed_url = parsed_url[parsed_url.rindex('@') + 1:]
    # Percent encoded colons and other characters will not be resolved as such
    # so we don't have to either.

    # Sometimes the netloc will contain the port which is not desired, so we
    # need to extract that.
    port = None
    # However, it could ALSO be an IPv6 address they've supplied.
    if ':' in parsed_url:
        # IPv6 addresses have colons in them already (it seems like always more than two)
        if parsed_url.count(':') >= 2:
            # Since IPv6 already use colons extensively, they wrap it in
            # brackets when there is a port, e.g. http://[2001:db8:1f70::999:de8:7648:6e8]:100/
            # However if it ends with a ']' then there is no port after it and
            # they've wrapped it in brackets just for fun.
            if ']' in parsed_url and not parsed_url.endswith(']'):
                # If this +1 throws a range error, we don't care, their url
                # shouldn't end with a colon.
                idx = parsed_url.rindex(':')
                # We parse as an int and let this fail ungracefully if parsing
                # fails because we desire to fail closed rather than open.
                port = int(parsed_url[idx + 1:])
                parsed_url = parsed_url[:idx]
            else:
                # Plain ipv6 without port
                pass
        else:
            # This should finally be ipv4 with port. It cannot be IPv6 as that
            # was caught by earlier cases, and it cannot be due to credentials.
            idx = parsed_url.rindex(':')
            port = int(parsed_url[idx + 1:])
            parsed_url = parsed_url[:idx]

    # safe to log out, no credentials/request path, just an IP + port
    log.debug("parsed url, port: %s : %s", parsed_url, port)
    # Call getaddrinfo to resolve hostname into tuples containing IPs.
    addrinfo = socket.getaddrinfo(parsed_url, port)
    # Get the IP addresses that this entry resolves to (uniquely)
    # We drop:
    #   AF_* family: It will resolve to AF_INET or AF_INET6, getaddrinfo(3) doesn't even mention AF_UNIX,
    #   socktype: We don't care if a stream/dgram/raw protocol
    #   protocol: we don't care if it is tcp or udp.
    addrinfo_results = set([info[4][0] for info in addrinfo])
    # There may be multiple (e.g. IPv4 + IPv6 or DNS round robin). Any one of these
    # could resolve to a local addresses (and could be returned by chance),
    # therefore we must check them all.
    for raw_ip in addrinfo_results:
        # Convert to an IP object so we can tell if it is in private space.
        ip = ipaddress.ip_address(unicodify(raw_ip))
        # If this is a private address
        if ip.is_private:
            results = []
            # If this IP is not anywhere in the whitelist
            for whitelisted in ip_whitelist:
                # If it's an IP address range (rather than a single one...)
                if hasattr(whitelisted, 'subnets'):
                    results.append(ip in whitelisted)
                else:
                    results.append(ip == whitelisted)

            if any(results):
                # If we had any True, then THIS (and ONLY THIS) IP address that
                # that specific DNS entry resolved to is in whitelisted and
                # safe to access. But we cannot exit here, we must ensure that
                # all IPs that that DNS entry resolves to are likewise safe.
                pass
            else:
                # Otherwise, we deny access.
                raise ConfigDoesNotAllowException(
                    "Access to this address in not permitted by server configuration"
                )
    return url
print ("")
#Technicians start counting at 1, computer scientists at 0
IP_ADDRESS_COLUMN = IP_ADDRESS_COLUMN - 1
#take the ip-address and start threads

#Threadpool config
CONFIG_PARAMS_LIST = []
threads = ThreadPool( THREAD_NUMBER )

#Start Action
starting_time = time()
try:
    for row in IP_ADDRESS_LIST:
        IP_ADDRESS = str.strip(str(row[IP_ADDRESS_COLUMN]))
        try:
            print ('Creating thread for:', ipaddress.ip_address(IP_ADDRESS))
            CONFIG_PARAMS_LIST.append( ( IP_ADDRESS, USERNAME, PASSWORD, FIRMEWARE_LIST, SCP_IP, SCP_USERNAME, SCP_PASSWORD ) )

        except:
            print ("**** Error: no IP-address: " + IP_ADDRESS + " ****")
except:
    print ("**** Error: seperator ****")

print ("\n--- Creating threadpool and launching ----\n")
results = threads.map( config_worker, CONFIG_PARAMS_LIST)

threads.close()
threads.join()

print ("\n---- End threadpool, elapsed time= " + str(round(time()-starting_time)) + "sec ----")
예제 #42
0
import socket
from subprocess import Popen, PIPE
wb = openpyxl.load_workbook(
    r"C:\Users\RamuGajula\Desktop\GF-V2\IP Inventory v1.0.xlsx")
ipinventory = wb['ARP Table']
#for i in range(ipinventory.max_row):
wb = openpyxl.Workbook()
sheet = wb.active
print(sheet.title)
sheet.title = '2BDC'
for i in range(0, 215):
    #print(ipinventory.cell(row = i+1,column = 2).value)
    ip_add = ipinventory.cell(row=i + 1, column=2).value
    try:

        arp_entry = ipaddress.ip_address(ip_add)
        #print("Initiating ping on Subnet:",netaddr)

    except ValueError:
        continue
    print(ip_add)

    # for j in ip_net.hosts():
    #     ip_add = str(j)
    #     #print(ip_add)
    #     res = subprocess.call(['ping','-n','1',ip_add],stdout=PIPE)
    #     if res == 0:
    #         #rev_name = socket.gethostbyadd(j)
    try:
        print("Trying to resolve...", ip_add)
        rev_name = socket.gethostbyaddr(ip_add)
예제 #43
0
파일: __init__.py 프로젝트: zaifei5/redash
def is_private_address(url):
    hostname = urlparse(url).hostname
    ip_address = socket.gethostbyname(hostname)
    return ipaddress.ip_address(text_type(ip_address)).is_private
예제 #44
0
def addressInNetwork(ip, net_n_bits):
    return ipaddress.ip_address(ip) in ipaddress.ip_network(net_n_bits)
예제 #45
0
def set_custom_dns_record(qname, rtype, value, action, env):
    # validate qname
    for zone, fn in get_dns_zones(env):
        # It must match a zone apex or be a subdomain of a zone
        # that we are otherwise hosting.
        if qname == zone or qname.endswith("." + zone):
            break
    else:
        # No match.
        if qname != "_secondary_nameserver":
            raise ValueError(
                "%s is not a domain name or a subdomain of a domain name managed by this box."
                % qname)

    # validate rtype
    rtype = rtype.upper()
    if value is not None and qname != "_secondary_nameserver":
        if rtype in ("A", "AAAA"):
            if value != "local":  # "local" is a special flag for us
                v = ipaddress.ip_address(
                    value)  # raises a ValueError if there's a problem
                if rtype == "A" and not isinstance(v, ipaddress.IPv4Address):
                    raise ValueError("That's an IPv6 address.")
                if rtype == "AAAA" and not isinstance(v,
                                                      ipaddress.IPv6Address):
                    raise ValueError("That's an IPv4 address.")
        elif rtype in ("CNAME", "TXT", "SRV", "MX"):
            # anything goes
            pass
        else:
            raise ValueError("Unknown record type '%s'." % rtype)

    # load existing config
    config = list(get_custom_dns_config(env))

    # update
    newconfig = []
    made_change = False
    needs_add = True
    for _qname, _rtype, _value in config:
        if action == "add":
            if (_qname, _rtype, _value) == (qname, rtype, value):
                # Record already exists. Bail.
                return False
        elif action == "set":
            if (_qname, _rtype) == (qname, rtype):
                if _value == value:
                    # Flag that the record already exists, don't
                    # need to add it.
                    needs_add = False
                else:
                    # Drop any other values for this (qname, rtype).
                    made_change = True
                    continue
        elif action == "remove":
            if (_qname, _rtype, _value) == (qname, rtype, value):
                # Drop this record.
                made_change = True
                continue
            if value == None and (_qname, _rtype) == (qname, rtype):
                # Drop all qname-rtype records.
                made_change = True
                continue
        else:
            raise ValueError("Invalid action: " + action)

        # Preserve this record.
        newconfig.append((_qname, _rtype, _value))

    if action in ("add", "set") and needs_add and value is not None:
        newconfig.append((qname, rtype, value))
        made_change = True

    if made_change:
        # serialize & save
        write_custom_dns_config(newconfig, env)
    return made_change
예제 #46
0
def _patched_decode_general_name(backend, gn):
    if gn.type == backend._lib.GEN_DNS:
        # Convert to bytes and then decode to utf8. We don't use
        # asn1_string_to_utf8 here because it doesn't properly convert
        # utf8 from ia5strings.
        name_bytes = _asn1_string_to_bytes(backend, gn.d.dNSName)
        try:
            data = name_bytes.decode("utf8")
        except UnicodeDecodeError:
            data = name_bytes.hex()
        # We don't use the constructor for DNSName so we can bypass validation
        # This allows us to create DNSName objects that have unicode chars
        # when a certificate (against the RFC) contains them.
        return x509.DNSName._init_without_validation(data)
    elif gn.type == backend._lib.GEN_URI:
        # Convert to bytes and then decode to utf8. We don't use
        # asn1_string_to_utf8 here because it doesn't properly convert
        # utf8 from ia5strings.
        name_bytes = _asn1_string_to_bytes(backend,
                                           gn.d.uniformResourceIdentifier)
        try:
            data = name_bytes.decode("utf8")
        except UnicodeDecodeError:
            # TODO: we could try utf16-be
            data = name_bytes.hex()
        # We don't use the constructor for URI so we can bypass validation
        # This allows us to create URI objects that have unicode chars
        # when a certificate (against the RFC) contains them.
        return x509.UniformResourceIdentifier._init_without_validation(data)
    elif gn.type == backend._lib.GEN_RID:
        oid = _obj2txt(backend, gn.d.registeredID)
        return x509.RegisteredID(x509.ObjectIdentifier(oid))
    elif gn.type == backend._lib.GEN_IPADD:
        data = _asn1_string_to_bytes(backend, gn.d.iPAddress)
        data_len = len(data)
        if data_len == 8 or data_len == 32:
            # This is an IPv4 or IPv6 Network and not a single IP. This
            # type of data appears in Name Constraints. Unfortunately,
            # ipaddress doesn't support packed bytes + netmask. Additionally,
            # IPv6Network can only handle CIDR rather than the full 16 byte
            # netmask. To handle this we convert the netmask to integer, then
            # find the first 0 bit, which will be the prefix. If another 1
            # bit is present after that the netmask is invalid.
            base = ipaddress.ip_address(data[:data_len // 2])
            netmask = ipaddress.ip_address(data[data_len // 2:])  # noqa: E203
            bits = bin(int(netmask))[2:]
            prefix = bits.find("0")
            # If no 0 bits are found it is a /32 or /128
            if prefix == -1:
                prefix = len(bits)

            if "1" in bits[prefix:]:
                raise ValueError("Invalid netmask")

            ip = ipaddress.ip_network(base.exploded + u"/{0}".format(prefix))
        else:
            try:
                ip = ipaddress.ip_address(data)
            except ValueError:
                ip = data

        return x509.IPAddress(ip)
    elif gn.type == backend._lib.GEN_DIRNAME:
        return x509.DirectoryName(
            _decode_x509_name(backend, gn.d.directoryName))
    elif gn.type == backend._lib.GEN_EMAIL:
        # Convert to bytes and then decode to utf8. We don't use
        # asn1_string_to_utf8 here because it doesn't properly convert
        # utf8 from ia5strings.
        data = _asn1_string_to_bytes(backend, gn.d.rfc822Name).decode("utf8")
        # We don't use the constructor for RFC822Name so we can bypass
        # validation. This allows us to create RFC822Name objects that have
        # unicode chars when a certificate (against the RFC) contains them.
        return x509.RFC822Name._init_without_validation(data)
    elif gn.type == backend._lib.GEN_OTHERNAME:
        type_id = _obj2txt(backend, gn.d.otherName.type_id)
        value = _asn1_to_der(backend, gn.d.otherName.value)
        return x509.OtherName(x509.ObjectIdentifier(type_id), value)
    else:
        # x400Address or ediPartyName
        raise x509.UnsupportedGeneralNameType(
            "{0} is not a supported type".format(
                x509._GENERAL_NAMES.get(gn.type, gn.type)),
            gn.type,
        )
예제 #47
0
get_client = p4.Ingress.get_client
clients = [
    {
        "ip": "192.168.63.15",
        "dmac": 0xb883036f4349,
        "port": 60
    },
    {
        "ip": "192.168.63.17",
        "dmac": 0xb883036f4328,
        "port": 44
    },
]
for client in clients:

    get_client.add_with_fwd_to_client(dst_addr=ip_address(client["ip"]),
                                      egress_port=client["port"],
                                      dmac=client["dmac"])

get_server_from_bucket = p4.Ingress.get_server_from_bucket
get_server_from_id = p4.Ingress.get_server_from_id
if True:
    #Multiple servers per machine, machine defined dynamically
    servers = [
        {
            "dmac": 0xb883036f4311,
            "port": 52,
            "sid": 1
        },
        {
            "dmac": 0xb883036f43d1,
예제 #48
0
    def __init__(self, server_addr, peer, path, options, stats_callback):
        """
        Class that deals with talking to a single client. Being a subclass of
        `multiprocessing.Process` this will run in a separate process from the
        main process.

        Note:
            Do not use this class as is, inherit from it and override the
            `get_response_data` method which must return a subclass of
            `ResponseData`.

        Args:
            server_addr (tuple): (ip, port) of the server

            peer (tuple): (ip, port of) the peer

            path (string): requested file

            options (dict): a dictionary containing the options the client
                wants to negotiate.

            stats_callback (callable): a callable that will be executed at the
                end of the session. It gets passed an instance of the
                `SessionStats` class.
        """
        self._timeout = int(options['default_timeout'])
        self._server_addr = server_addr
        self._reset_timeout()
        self._retries = int(options['retries'])
        self._block_size = constants.DEFAULT_BLKSIZE
        self._last_block_sent = 0
        self._retransmits = 0
        self._current_block = None
        self._should_stop = False
        self._path = path
        self._options = options
        self._stats_callback = stats_callback
        self._response_data = None
        self._listener = None

        self._peer = peer
        logging.info(
            "New connection from peer `%s` asking for path `%s`" %
            (str(peer), str(path))
        )
        self._family = socket.AF_INET6
        # the format of the peer tuple is different for v4 and v6
        if isinstance(
            ipaddress.ip_address(server_addr[0]), ipaddress.IPv4Address
        ):
            self._family = socket.AF_INET
            # peer address format is different in v4 world
            self._peer = (self._peer[0].replace('::ffff:', ''), self._peer[1])

        self._stats = SessionStats(self._server_addr, self._peer, self._path)

        try:
            self._response_data = self.get_response_data()
        except Exception as e:
            logging.exception("Caught exception: %s." % e)
            self._stats.error = {
                'error_code': constants.ERR_UNDEFINED,
                'error_message': str(e),
            }

        super().__init__()
 async def test_fetch(self):
     response = await fetch()
     ip = response.get('ip')
     self.assertTrue(type(ipaddress.ip_address(ip)), ipaddress.IPv4Address)
예제 #50
0
def generate_data_set(url):
    global domain_name, registration_length
    data_set = []

    # Converts the given URL into standard format
    if not re.match(r"^https?", url):
        url = "http://" + url

    # Stores the response of the given URL
    try:
        response = requests.get(url)
        soup = BeautifulSoup(response.text, "lxml")
    except:
        response = ""
        soup = -999

    # Extracts domain from the given URL
    # domain = re.findall(r"://([^/]+)/?", url)[0]
    domain = urlparse(url).netloc
    if re.match(r"^www.", domain):
        domain = domain.replace("www.", "")

    # Requests all the information about the domain
    """try:
        whois_response = whois.whois(domain)
    except whois.parser.PywhoisError:
        whois_response = 'none'"""

    rank_checker_response = requests.post(
        "https://www.checkpagerank.net/index.php", {"name": domain})

    # Extracts global rank of the website
    try:
        global_rank = int(
            re.findall(r"Global Rank: ([0-9]+)",
                       rank_checker_response.text)[0])
    except:
        global_rank = -1

    # 1.having_IP_Address
    try:
        ipaddress.ip_address(url)
        data_set.append(-1)
    except:
        data_set.append(1)

    # 2.URL_Length
    if len(url) < 54:
        data_set.append(1)
    elif len(url) >= 54 and len(url) <= 75:
        data_set.append(0)
    else:
        data_set.append(-1)

    # 3.Shortining_Service
    match = re.search(
        'bit\.ly|goo\.gl|shorte\.st|go2l\.ink|x\.co|ow\.ly|t\.co|tinyurl|tr\.im|is\.gd|cli\.gs|'
        'yfrog\.com|migre\.me|ff\.im|tiny\.cc|url4\.eu|twit\.ac|su\.pr|twurl\.nl|snipurl\.com|'
        'short\.to|BudURL\.com|ping\.fm|post\.ly|Just\.as|bkite\.com|snipr\.com|fic\.kr|loopt\.us|'
        'doiop\.com|short\.ie|kl\.am|wp\.me|rubyurl\.com|om\.ly|to\.ly|bit\.do|t\.co|lnkd\.in|'
        'db\.tt|qr\.ae|adf\.ly|goo\.gl|bitly\.com|cur\.lv|tinyurl\.com|ow\.ly|bit\.ly|ity\.im|'
        'q\.gs|is\.gd|po\.st|bc\.vc|twitthis\.com|u\.to|j\.mp|buzurl\.com|cutt\.us|u\.bb|yourls\.org|'
        'x\.co|prettylinkpro\.com|scrnch\.me|filoops\.info|vzturl\.com|qr\.net|1url\.com|tweez\.me|v\.gd|tr\.im|link\.zip\.net',
        url)
    if match:
        data_set.append(-1)
    else:
        data_set.append(1)

    # 4.having_At_Symbol
    if re.findall("@", url):
        data_set.append(-1)
    else:
        data_set.append(1)

    # 5.double_slash_redirecting
    list = [x.start(0) for x in re.finditer('//', url)]
    if list[len(list) - 1] > 6:
        data_set.append(-1)
    else:
        data_set.append(1)

    # 6.Prefix_Suffix
    if re.findall(r"https?://[^\-]+-[^\-]+/", url):
        data_set.append(-1)
    else:
        data_set.append(1)

    # 7.having_Sub_Domain
    if len(re.findall("\.", url)) == 1:
        data_set.append(1)
    elif len(re.findall("\.", url)) == 2:
        data_set.append(0)
    else:
        data_set.append(-1)

    # 8.SSLfinal_State
    try:
        if response.text:
            data_set.append(1)
    except:
        data_set.append(-1)

    # 9.Domain_registeration_length

    dns = 0
    try:
        domain_name = whois.whois(domain)
    except:
        dns = 1

    if dns == 1:
        data_set.append(1)  # phishing
    else:
        expiration_date = domain_name.expiration_date
        today = time.strftime('%Y-%m-%d')
        today = datetime.strptime(today, '%Y-%m-%d')
        if expiration_date is None:
            data_set.append(-1)
        elif type(expiration_date) is list or type(today) is list:
            data_set.append(
                0
            )  # If it is a type of list then we can't select a single value from list. So,it is regarded as suspected website
        else:
            creation_date = domain_name.creation_date
            expiration_date = domain_name.expiration_date
            if (isinstance(creation_date, str)
                    or isinstance(expiration_date, str)):
                try:
                    creation_date = datetime.strptime(creation_date,
                                                      '%Y-%m-%d')
                    expiration_date = datetime.strptime(
                        expiration_date, "%Y-%m-%d")
                except:
                    data_set.append(0)
            try:
                registration_length = abs((expiration_date[0] - today).days)
            except:
                registration_length = abs((expiration_date - today).days)
            if registration_length / 365 <= 1:
                data_set.append(-1)  # phishing
            else:
                data_set.append(1)  # legitimate

    # 10.Favicon
    if soup == -999:
        data_set.append(-1)
    else:
        try:
            for head in soup.find_all('head'):
                for head.link in soup.find_all('link', href=True):
                    dots = [
                        x.start(0)
                        for x in re.finditer('\.', head.link['href'])
                    ]
                    if url in head.link['href'] or len(
                            dots) == 1 or domain in head.link['href']:
                        data_set.append(1)
                        raise StopIteration
                    else:
                        data_set.append(-1)
                        raise StopIteration
        except StopIteration:
            pass

    # 11. port
    try:
        port = domain.split(":")[1]
        if port:
            data_set.append(-1)
        else:
            data_set.append(1)
    except:
        data_set.append(1)

    # 12. HTTPS_token
    if re.findall(r"^https://", url):
        data_set.append(1)
    else:
        data_set.append(-1)

    # 13. Request_URL
    i = 0
    success = 0
    if soup == -999:
        data_set.append(-1)
    else:
        for img in soup.find_all('img', src=True):
            dots = [x.start(0) for x in re.finditer('\.', img['src'])]
            if url in img['src'] or domain in img['src'] or len(dots) == 1:
                success = success + 1
            i = i + 1

        for audio in soup.find_all('audio', src=True):
            dots = [x.start(0) for x in re.finditer('\.', audio['src'])]
            if url in audio['src'] or domain in audio['src'] or len(dots) == 1:
                success = success + 1
            i = i + 1

        for embed in soup.find_all('embed', src=True):
            dots = [x.start(0) for x in re.finditer('\.', embed['src'])]
            if url in embed['src'] or domain in embed['src'] or len(dots) == 1:
                success = success + 1
            i = i + 1

        for iframe in soup.find_all('iframe', src=True):
            dots = [x.start(0) for x in re.finditer('\.', iframe['src'])]
            if url in iframe['src'] or domain in iframe['src'] or len(
                    dots) == 1:
                success = success + 1
            i = i + 1

        try:
            percentage = success / float(i) * 100
            if percentage < 22.0:
                data_set.append(1)
            elif ((percentage >= 22.0) and (percentage < 61.0)):
                data_set.append(0)
            else:
                data_set.append(-1)
        except:
            data_set.append(1)

    # 14. URL_of_Anchor
    percentage = 0
    i = 0
    unsafe = 0
    if soup == -999:
        data_set.append(-1)
    else:
        for a in soup.find_all('a', href=True):
            # 2nd condition was 'JavaScript ::void(0)' but we put JavaScript because the space between javascript and :: might not be
            # there in the actual a['href']
            if "#" in a['href'] or "javascript" in a['href'].lower(
            ) or "mailto" in a['href'].lower() or not (url in a['href']
                                                       or domain in a['href']):
                unsafe = unsafe + 1
            i = i + 1

        try:
            percentage = unsafe / float(i) * 100
        except:
            data_set.append(1)

        if percentage < 31.0:
            data_set.append(1)
        elif ((percentage >= 31.0) and (percentage < 67.0)):
            data_set.append(0)
        else:
            data_set.append(-1)

    # 15. Links_in_tags
    i = 0
    success = 0
    if soup == -999:
        data_set.append(-1)
    else:
        for link in soup.find_all('link', href=True):
            dots = [x.start(0) for x in re.finditer('\.', link['href'])]
            if url in link['href'] or domain in link['href'] or len(dots) == 1:
                success = success + 1
            i = i + 1

        for script in soup.find_all('script', src=True):
            dots = [x.start(0) for x in re.finditer('\.', script['src'])]
            if url in script['src'] or domain in script['src'] or len(
                    dots) == 1:
                success = success + 1
            i = i + 1
        try:
            percentage = success / float(i) * 100
        except:
            data_set.append(1)

        if percentage < 17.0:
            data_set.append(1)
        elif ((percentage >= 17.0) and (percentage < 81.0)):
            data_set.append(0)
        else:
            data_set.append(-1)

        # 16. SFH
        for form in soup.find_all('form', action=True):
            if form['action'] == "" or form['action'] == "about:blank":
                data_set.append(-1)
                break
            elif url not in form['action'] and domain not in form['action']:
                data_set.append(0)
                break
            else:
                data_set.append(1)
                break

    # 17. Submitting_to_email
    if response == "":
        data_set.append(-1)
    else:
        if re.findall(r"[mail\(\)|mailto:?]", response.text):
            data_set.append(1)
        else:
            data_set.append(-1)

    # 18. Abnormal_URL
    if response == "":
        data_set.append(-1)
    else:
        if response.text == "":
            data_set.append(1)
        else:
            data_set.append(-1)

    # 19. Redirect
    if response == "":
        data_set.append(-1)
    else:
        if len(response.history) <= 1:
            data_set.append(-1)
        elif len(response.history) <= 4:
            data_set.append(0)
        else:
            data_set.append(1)

    # 20. on_mouseover
    if response == "":
        data_set.append(-1)
    else:
        if re.findall("<script>.+onmouseover.+</script>", response.text):
            data_set.append(1)
        else:
            data_set.append(-1)

    # 21. RightClick
    if response == "":
        data_set.append(-1)
    else:
        if re.findall(r"event.button ?== ?2", response.text):
            data_set.append(1)
        else:
            data_set.append(-1)

    # 22. popUpWidnow
    if response == "":
        data_set.append(-1)
    else:
        if re.findall(r"alert\(", response.text):
            data_set.append(1)
        else:
            data_set.append(-1)

    # 23. Iframe
    if response == "":
        data_set.append(-1)
    else:
        if re.findall(r"[<iframe>|<frameBorder>]", response.text):
            data_set.append(1)
        else:
            data_set.append(-1)

    # 24. age_of_domain
    if response == "":
        data_set.append(-1)
    else:
        try:
            registration_date = \
            re.findall(r'Registration Date:</div><div class="df-value">([^<]+)</div>', domain_name.text)[0]
            if diff_month(date.today(), date_parse(registration_date)) >= 6:
                data_set.append(-1)
            else:
                data_set.append(1)
        except:
            data_set.append(1)

    # 25. DNSRecord
    dns = 1
    try:
        d = whois.whois(domain)
    except:
        dns = -1
    if dns == -1:
        data_set.append(-1)
    else:
        data_set.append(1)

    # 26. web_traffic
    try:
        rank = BeautifulSoup(
            urllib.request.urlopen(
                "http://data.alexa.com/data?cli=10&dat=s&url=" + url).read(),
            "xml").find("REACH")['RANK']
        rank = int(rank)
        if (rank < 100000):
            data_set.append(1)
        else:
            data_set.append(0)
    except TypeError:
        data_set.append(-1)

    # 27. Page_Rank
    try:
        if global_rank > 0 and global_rank < 100000:
            data_set.append(-1)
        else:
            data_set.append(1)
    except:
        data_set.append(1)

    # 28. Google_Index
    site = search(url, 5)
    if site:
        data_set.append(1)
    else:
        data_set.append(-1)

    # 29. Links_pointing_to_page
    if response == "":
        data_set.append(-1)
    else:
        number_of_links = len(re.findall(r"<a href=", response.text))
        if number_of_links == 0:
            data_set.append(1)
        elif number_of_links <= 2:
            data_set.append(0)
        else:
            data_set.append(-1)

    # 30. Statistical_report
    url_match = re.search(
        'at\.ua|usa\.cc|baltazarpresentes\.com\.br|pe\.hu|esy\.es|hol\.es|sweddy\.com|myjino\.ru|96\.lt|ow\.ly',
        url)
    try:
        ip_address = socket.gethostbyname(domain)
        ip_match = re.search(
            '146\.112\.61\.108|213\.174\.157\.151|121\.50\.168\.88|192\.185\.217\.116|78\.46\.211\.158|181\.174\.165\.13|46\.242\.145\.103|121\.50\.168\.40|83\.125\.22\.219|46\.242\.145\.98|'
            '107\.151\.148\.44|107\.151\.148\.107|64\.70\.19\.203|199\.184\.144\.27|107\.151\.148\.108|107\.151\.148\.109|119\.28\.52\.61|54\.83\.43\.69|52\.69\.166\.231|216\.58\.192\.225|'
            '118\.184\.25\.86|67\.208\.74\.71|23\.253\.126\.58|104\.239\.157\.210|175\.126\.123\.219|141\.8\.224\.221|10\.10\.10\.10|43\.229\.108\.32|103\.232\.215\.140|69\.172\.201\.153|'
            '216\.218\.185\.162|54\.225\.104\.146|103\.243\.24\.98|199\.59\.243\.120|31\.170\.160\.61|213\.19\.128\.77|62\.113\.226\.131|208\.100\.26\.234|195\.16\.127\.102|195\.16\.127\.157|'
            '34\.196\.13\.28|103\.224\.212\.222|172\.217\.4\.225|54\.72\.9\.51|192\.64\.147\.141|198\.200\.56\.183|23\.253\.164\.103|52\.48\.191\.26|52\.214\.197\.72|87\.98\.255\.18|209\.99\.17\.27|'
            '216\.38\.62\.18|104\.130\.124\.96|47\.89\.58\.141|78\.46\.211\.158|54\.86\.225\.156|54\.82\.156\.19|37\.157\.192\.102|204\.11\.56\.48|110\.34\.231\.42',
            ip_address)
        if url_match:
            data_set.append(-1)
        elif ip_match:
            data_set.append(-1)
        else:
            data_set.append(1)
    except:
        pass

    return data_set
예제 #51
0
    def _deserialize(self, value, attr, data):
        general_names = []
        for name in value:
            if name['nameType'] == 'DNSName':
                validators.sensitive_domain(name['value'])
                general_names.append(x509.DNSName(name['value']))

            elif name['nameType'] == 'IPAddress':
                general_names.append(
                    x509.IPAddress(ipaddress.ip_address(name['value'])))

            elif name['nameType'] == 'IPNetwork':
                general_names.append(
                    x509.IPAddress(ipaddress.ip_network(name['value'])))

            elif name['nameType'] == 'uniformResourceIdentifier':
                general_names.append(
                    x509.UniformResourceIdentifier(name['value']))

            elif name['nameType'] == 'directoryName':
                # TODO: Need to parse a string in name['value'] like:
                # 'CN=Common Name, O=Org Name, OU=OrgUnit Name, C=US, ST=ST, L=City/[email protected]'
                # or
                # 'CN=Common Name/O=Org Name/OU=OrgUnit Name/C=US/ST=NH/L=City/[email protected]'
                # and turn it into something like:
                # x509.Name([
                #     x509.NameAttribute(x509.OID_COMMON_NAME, "Common Name"),
                #     x509.NameAttribute(x509.OID_ORGANIZATION_NAME, "Org Name"),
                #     x509.NameAttribute(x509.OID_ORGANIZATIONAL_UNIT_NAME, "OrgUnit Name"),
                #     x509.NameAttribute(x509.OID_COUNTRY_NAME, "US"),
                #     x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, "NH"),
                #     x509.NameAttribute(x509.OID_LOCALITY_NAME, "City"),
                #     x509.NameAttribute(x509.OID_EMAIL_ADDRESS, "*****@*****.**")
                # ]
                # general_names.append(x509.DirectoryName(x509.Name(BLAH))))
                pass

            elif name['nameType'] == 'rfc822Name':
                general_names.append(x509.RFC822Name(name['value']))

            elif name['nameType'] == 'registeredID':
                general_names.append(
                    x509.RegisteredID(x509.ObjectIdentifier(name['value'])))

            elif name['nameType'] == 'otherName':
                # This has two inputs (type and value), so it doesn't fit the mold of the rest of these GeneralName entities.
                # general_names.append(x509.OtherName(name['type'], bytes(name['value']), 'utf-8'))
                pass

            elif name['nameType'] == 'x400Address':
                # The Python Cryptography library doesn't support x400Address types (yet?)
                pass

            elif name['nameType'] == 'EDIPartyName':
                # The Python Cryptography library doesn't support EDIPartyName types (yet?)
                pass

            else:
                current_app.logger.warning(
                    'Unable to deserialize SubAltName with type: {name_type}'.
                    format(name_type=name['nameType']))

        return x509.SubjectAlternativeName(general_names)
예제 #52
0
파일: ipnetwork.py 프로젝트: qmiinh/openr
def ip_version(addr):
    """return ip addr version"""

    return ipaddress.ip_address(addr).version
 def __init__(self, ip):
     self._ip = int(ipaddress.ip_address(ip))
예제 #54
0
    def _check_ip(self, ip):
        """Here all the workload happens. Read the files, check if the ip is in there and report the results.
        If the lock file is found, which gets created when lists are getting updated, the script starts to sleep 10
        seconds before checking again. Also reads the source file date and checks, if its too old (ignoreolderthandays
        parameter).

        :param ip: IP to search for.
        :type ip: str
        :returns: List of hits containing dictionaries.
        :rtype: list
        """

        # hits will be the variable to store all matches
        hits = []
        description = {}
        file_date = {}
        # Check for lock
        while os.path.isfile('{}/.lock'.format(self.path)):
            sleep(10)

        # First: check the ipsets
        for ipset in self.ipsets:
            with open('{}/{}'.format(self.path, ipset)) as afile:
                ipsetname = ipset.split('.')[0]
                description.update({ipsetname: ''})
                file_date.update({ipsetname: ''})
                for l in afile:
                    if l[0] == '#':
                        # Check for date and break if too old
                        if '# Source File Date: ' in l:
                            datestr = re.sub('# Source File Date: ', '',
                                             l.rstrip('\n'))
                            date = parse(datestr)
                            file_date[ipsetname] = str(date)
                            if (self.now -
                                    date).days > self.ignoreolderthandays:
                                break
                        description[ipsetname] += re.sub(r'^\[.*\] \(.*\) [a-zA-Z0-9.\- ]*$', '', l.lstrip('# '))\
                            .replace('\n\n', '\n')
                    else:
                        if ip in l:
                            # On match append to hits and break; next file!
                            hits.append({
                                'list':
                                ipsetname,
                                'description':
                                description.get(ipsetname),
                                'file_date':
                                file_date.get(ipsetname)
                            })
                            break

        # Second: check the netsets
        for netset in self.netsets:
            with open('{}/{}'.format(self.path, netset)) as afile:
                netsetname = netset.split('.')[0]
                description.update({netsetname: ''})
                file_date.update({netsetname: ''})
                for l in afile:
                    if l[0] == '#':
                        # Check for date and break if too old
                        if '# Source File Date: ' in l:
                            datestr = re.sub('# Source File Date: ', '',
                                             l.rstrip('\n'))
                            date = parse(datestr)
                            file_date[netsetname] = str(date)
                            if (self.now -
                                    date).days > self.ignoreolderthandays:
                                break
                        description[netsetname] += re.sub(r'^\[.*\] \(.*\) [a-zA-Z0-9.\- ]*$', '', l.lstrip('# '))\
                            .replace('\n\n', '\n')
                    else:
                        try:
                            if ipaddress.ip_address(
                                    ip) in ipaddress.ip_network(u'{}'.format(
                                        l.split('\n')[0])):
                                hits.append({
                                    'list':
                                    netsetname,
                                    'description':
                                    description.get(netsetname),
                                    'file_date':
                                    file_date.get(netsetname)
                                })
                                break
                        except ValueError as e:
                            self.error(
                                'ValueError occured. Used values: ipnetwork {}, ip to check {}, file {}.'
                                'Error message: {}'.format(l, ip, netset, e))

        return hits
예제 #55
0
def validate_ip(ctx, param, value):
    try:
        ipaddress.ip_address(value)
        return value
    except ValueError as ex:
        raise click.BadParameter("Invalid IP: %s" % ex)
예제 #56
0
파일: network.py 프로젝트: jcgoette/core
def get_url(
    hass: HomeAssistant,
    *,
    require_current_request: bool = False,
    require_ssl: bool = False,
    require_standard_port: bool = False,
    allow_internal: bool = True,
    allow_external: bool = True,
    allow_cloud: bool = True,
    allow_ip: bool | None = None,
    prefer_external: bool | None = None,
    prefer_cloud: bool = False,
) -> str:
    """Get a URL to this instance."""
    if require_current_request and http.current_request.get() is None:
        raise NoURLAvailableError

    if prefer_external is None:
        prefer_external = hass.config.api is not None and hass.config.api.use_ssl

    if allow_ip is None:
        allow_ip = hass.config.api is None or not hass.config.api.use_ssl

    order = [TYPE_URL_INTERNAL, TYPE_URL_EXTERNAL]
    if prefer_external:
        order.reverse()

    # Try finding an URL in the order specified
    for url_type in order:

        if allow_internal and url_type == TYPE_URL_INTERNAL:
            with suppress(NoURLAvailableError):
                return _get_internal_url(
                    hass,
                    allow_ip=allow_ip,
                    require_current_request=require_current_request,
                    require_ssl=require_ssl,
                    require_standard_port=require_standard_port,
                )

        if allow_external and url_type == TYPE_URL_EXTERNAL:
            with suppress(NoURLAvailableError):
                return _get_external_url(
                    hass,
                    allow_cloud=allow_cloud,
                    allow_ip=allow_ip,
                    prefer_cloud=prefer_cloud,
                    require_current_request=require_current_request,
                    require_ssl=require_ssl,
                    require_standard_port=require_standard_port,
                )

    # For current request, we accept loopback interfaces (e.g., 127.0.0.1),
    # the Supervisor hostname and localhost transparently
    request_host = _get_request_host()
    if (
        require_current_request
        and request_host is not None
        and hass.config.api is not None
    ):
        scheme = "https" if hass.config.api.use_ssl else "http"
        current_url = yarl.URL.build(
            scheme=scheme, host=request_host, port=hass.config.api.port
        )

        known_hostnames = ["localhost"]
        if hass.components.hassio.is_hassio():
            host_info = hass.components.hassio.get_host_info()
            known_hostnames.extend(
                [host_info["hostname"], f"{host_info['hostname']}.local"]
            )

        if (
            (
                (
                    allow_ip
                    and is_ip_address(request_host)
                    and is_loopback(ip_address(request_host))
                )
                or request_host in known_hostnames
            )
            and (not require_ssl or current_url.scheme == "https")
            and (not require_standard_port or current_url.is_default_port())
        ):
            return normalize_url(str(current_url))

    # We have to be honest now, we have no viable option available
    raise NoURLAvailableError
예제 #57
0
파일: vagrant.py 프로젝트: zhulh200868/salt
def get_ssh_config(name, network_mask='', get_private_key=False):
    r'''
    Retrieve hints of how you might connect to a Vagrant VM.

    :param name: the salt_id of the machine
    :param network_mask: a CIDR mask to search for the VM's address
    :param get_private_key: (default: False) return the key used for ssh login
    :return: a dict of ssh login information for the VM

    CLI Example:

    .. code-block:: bash

        salt <host> vagrant.get_ssh_config <salt_id>
        salt my_laptop vagrant.get_ssh_config quail1 network_mask=10.0.0.0/8 get_private_key=True

    The returned dictionary contains:

    - key_filename:  the name of the private key file on the VM host computer
    - ssh_username:  the username to be used to log in to the VM
    - ssh_host:  the IP address used to log in to the VM.  (This will usually be `127.0.0.1`)
    - ssh_port:  the TCP port used to log in to the VM.  (This will often be `2222`)
    - \[ip_address:\]  (if `network_mask` is defined. see below)
    - \[private_key:\]  (if `get_private_key` is True) the private key for ssh_username

    About `network_mask`:

    Vagrant usually uses a redirected TCP port on its host computer to log in to a VM using ssh.
    This redirected port and its IP address are "ssh_port" and "ssh_host".  The ssh_host is
    usually the localhost (127.0.0.1).
    This makes it impossible for a third machine (such as a salt-cloud master) to contact the VM
    unless the VM has another network interface defined.  You will usually want a bridged network
    defined by having a `config.vm.network "public_network"` statement in your `Vagrantfile`.

    The IP address of the bridged adapter will typically be assigned by DHCP and unknown to you,
    but you should be able to determine what IP network the address will be chosen from.
    If you enter a CIDR network mask, Salt will attempt to find the VM's address for you.
    The host machine will send an "ifconfig" command to the VM (using ssh to `ssh_host`:`ssh_port`)
    and return the IP address of the first interface it can find which matches your mask.
    '''
    vm_ = get_vm_info(name)

    ssh_config = _vagrant_ssh_config(vm_)

    try:
        ans = {
            'key_filename': ssh_config['IdentityFile'],
            'ssh_username': ssh_config['User'],
            'ssh_host': ssh_config['HostName'],
            'ssh_port': ssh_config['Port'],
        }

    except KeyError:
        raise CommandExecutionError(
            'Insufficient SSH information to contact VM {}. '
            'Is it running?'.format(vm_.get('machine', '(default)')))

    if network_mask:
        #  ask the new VM to report its network address
        command = 'ssh -i {IdentityFile} -p {Port} ' \
                  '-oStrictHostKeyChecking={StrictHostKeyChecking} ' \
                  '-oUserKnownHostsFile={UserKnownHostsFile} ' \
                  '-oControlPath=none ' \
                  '{User}@{HostName} ifconfig'.format(**ssh_config)

        log.info('Trying ssh -p {Port} {User}@{HostName} ifconfig'.format(
            **ssh_config))
        reply = __salt__['cmd.shell'](command)
        log.info('--->\n' + reply)
        target_network_range = ipaddress.ip_network(network_mask, strict=False)

        for line in reply.split('\n'):
            try:  # try to find a bridged network address
                # the lines we are looking for appear like:
                #    "inet addr:10.124.31.185  Bcast:10.124.31.255  Mask:255.255.248.0"
                # or "inet6 addr: fe80::a00:27ff:fe04:7aac/64 Scope:Link"
                tokens = line.replace(
                    'addr:', '',
                    1).split()  # remove "addr:" if it exists, then split
                found_address = None
                if "inet" in tokens:
                    nxt = tokens.index("inet") + 1
                    found_address = ipaddress.ip_address(tokens[nxt])
                elif "inet6" in tokens:
                    nxt = tokens.index("inet6") + 1
                    found_address = ipaddress.ip_address(
                        tokens[nxt].split('/')[0])
                if found_address in target_network_range:
                    ans['ip_address'] = six.text_type(found_address)
                    break  # we have located a good matching address
            except (IndexError, AttributeError, TypeError):
                pass  # all syntax and type errors loop here
                # falling out if the loop leaves us remembering the last candidate
        log.info('Network IP address in %s detected as: %s',
                 target_network_range, ans.get('ip_address', '(not found)'))

    if get_private_key:
        # retrieve the Vagrant private key from the host
        try:
            with salt.utils.files.fopen(ssh_config['IdentityFile']) as pks:
                ans['private_key'] = salt.utils.stringutils.to_unicode(
                    pks.read())
        except (OSError, IOError) as e:
            raise CommandExecutionError(
                "Error processing Vagrant private key file: {}".format(e))
    return ans
예제 #58
0
    def attach(self,
               ue_id,
               attach_type,
               resp_type,
               resp_msg_type,
               sec_ctxt=s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT,
               id_type=s1ap_types.TFW_MID_TYPE_IMSI,
               eps_type=s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH):
        """
        Given a UE issue the attach request of specified type

        Caches the assigned IP address, if any is assigned

        Args:
            ue_id: The eNB ue_id
            attach_type: The type of attach e.g. UE_END_TO_END_ATTACH_REQUEST
            resp_type: enum type of the expected response
            sec_ctxt: Optional param allows for the reuse of the security
                context, defaults to creating a new security context.
            id_type: Optional param allows for changing up the ID type,
                defaults to s1ap_types.TFW_MID_TYPE_IMSI.
            eps_type: Optional param allows for variation in the EPS attach
                type, defaults to s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH.
        """
        attach_req = s1ap_types.ueAttachRequest_t()
        attach_req.ue_Id = ue_id
        attach_req.mIdType = id_type
        attach_req.epsAttachType = eps_type
        attach_req.useOldSecCtxt = sec_ctxt

        assert (self.issue_cmd(attach_type, attach_req) == 0)
        response = self.get_response()

        # The MME actually sends INT_CTX_SETUP_IND and UE_ATTACH_ACCEPT_IND in
        # one message, but the s1aptester splits it and sends the tests 2
        # messages. Usually context setup comes before attach accept, but
        # it's possible it may happen the other way
        if s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value == response.msg_type:
            response = self.get_response()
        elif s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND.value == response.msg_type:
            context_setup = self.get_response()
            assert (context_setup.msg_type ==
                    s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value)

        logging.debug('s1ap response expected, received: %d, %d',
                      resp_type.value, response.msg_type)
        assert (resp_type.value == response.msg_type)

        msg = response.cast(resp_msg_type)

        # We only support IPv4 right now, as max PDN address in S1AP tester is
        # currently 13 bytes, which is too short for IPv6 (which requires 16)
        if resp_msg_type == s1ap_types.ueAttachAccept_t:
            pdn_type = msg.esmInfo.pAddr.pdnType
            addr = msg.esmInfo.pAddr.addrInfo
            if S1ApUtil.CM_ESM_PDN_IPV4 == pdn_type:
                # Cast and cache the IPv4 address
                ip = ipaddress.ip_address(bytes(addr[:4]))
                with self._lock:
                    self._ue_ip_map[ue_id] = ip
            else:
                raise ValueError('PDN TYPE %s not supported' % pdn_type)
        return msg
예제 #59
0
 def ip_address(self):
     '''The host as a python ip_address object, or None.'''
     try:
         return ip_address(self.host)
     except ValueError:
         return None
예제 #60
0
import json
import urllib
import urllib2
import ipaddress

import time

start_time = time.time()

init_ip = u'192.168.1.1'

post_params = {'req': 'get_ip'}

url = 'http://localhost:8888'
params = urllib.urlencode(post_params)
ip = ipaddress.ip_address(init_ip)
i = 0
while i < 253:
    #print str(ip)
    i = i + 1
    url = u'http://' + str(ip) + u':8888'
    ip = ip + 1
    print url
    try:
        response = urllib2.urlopen(url, params, timeout=0.1)
        json_response = json.load(response)
        if (json_response.has_key('status')
                and json_response.get('status') == 0):
            print '**************' + json_response.get('ret')
            print time.time() - start_time
            exit(0)