Beispiel #1
0
def fromfd(fd, keep_fd=True):
    """Create a socket from a file descriptor

    socket domain (family), type and protocol are auto-detected. By default
    the socket uses a dup()ed fd. The original fd can be closed.

    The parameter `keep_fd` influences fd duplication. Under Python 2 the
    fd is still duplicated but the input fd is closed. Under Python 3 and
    with `keep_fd=True`, the new socket object uses the same fd.

    :param fd: socket fd
    :type fd: int
    :param keep_fd: keep input fd
    :type keep_fd: bool
    :return: socket.socket instance
    :raises OSError: for invalid socket fd
    """
    family = _raw_getsockopt(fd, socket.SOL_SOCKET, SO_DOMAIN)
    typ = _raw_getsockopt(fd, socket.SOL_SOCKET, SO_TYPE)
    proto = _raw_getsockopt(fd, socket.SOL_SOCKET, SO_PROTOCOL)
    if sys.version_info.major == 2:
        # Python 2 has no fileno argument and always duplicates the fd
        sockobj = socket.fromfd(fd, family, typ, proto)
        sock = socket.socket(None, None, None, _sock=sockobj)
        if not keep_fd:
            os.close(fd)
        return sock
    else:
        if keep_fd:
            return socket.fromfd(fd, family, typ, proto)
        else:
            return socket.socket(family, typ, proto, fileno=fd)
Beispiel #2
0
    def _get_systemd_socket(self, address):
        fds = sd.listen_fds()
        if not fds:
            return address
        elif len(fds) > 1:
            raise ValueError('Too many listening sockets', fds)

        if isinstance(address, tuple):
            port = address[1]
            # systemd uses IPv6
            if not sd.is_socket_inet(fds[0], family=socket.AF_INET6,
                                     type=socket.SOCK_STREAM,
                                     listening=True, port=port):
                raise ValueError("FD {} is not TCP IPv6 socket on port {}",
                                 fds[0], port)
            logger.info('Using systemd socket activation on port %i', port)
            sock = socket.fromfd(fds[0], socket.AF_INET6, socket.SOCK_STREAM)
        else:
            if not sd.is_socket_unix(fds[0], socket.SOCK_STREAM,
                                     listening=True, path=address):
                raise ValueError("FD {} is not Unix stream socket on path {}",
                                 fds[0], address)
            logger.info('Using systemd socket activation on path %s', address)
            sock = socket.fromfd(fds[0], socket.AF_UNIX, socket.SOCK_STREAM)

        if sys.version_info[0] < 3:
            # Python 2.7's socket.fromfd() returns _socket.socket
            sock = socket.socket(_sock=sock)
        return sock
Beispiel #3
0
 def closesocketfunction(curlfd):
     called['called'] = True
     # Unix only
     #os.close(curlfd)
     # Unix & Windows
     socket.fromfd(curlfd, socket.AF_INET, socket.SOCK_STREAM).close()
     return 0
 def socketpair():
     s1, s2 = eunuchs.socketpair.socketpair()
     p, c = (socket.fromfd(s1, socket.AF_UNIX, socket.SOCK_STREAM),
             socket.fromfd(s2, socket.AF_UNIX, socket.SOCK_STREAM))
     os.close(s1)
     os.close(s2)
     return p, c
Beispiel #5
0
 def __init__(self, thread_index):
     protocol_str = os.getenv('CLOUDI_API_INIT_PROTOCOL')
     if protocol_str is None:
         raise invalid_input_exception()
     buffer_size_str = os.getenv('CLOUDI_API_INIT_BUFFER_SIZE')
     if buffer_size_str is None:
         raise invalid_input_exception()
     if protocol_str == 'tcp':
         self.__s = socket.fromfd(
             thread_index + 3, socket.AF_INET, socket.SOCK_STREAM
         )
         self.__use_header = True
     elif protocol_str == 'udp':
         self.__s = socket.fromfd(
             thread_index + 3, socket.AF_INET, socket.SOCK_DGRAM
         )
         self.__use_header = False
     elif protocol_str == 'local':
         self.__s = socket.fromfd(
             thread_index + 3, socket.AF_UNIX, socket.SOCK_STREAM
         )
         self.__use_header = True
     else:
         raise invalid_input_exception()
     self.__initializtion_complete = False
     self.__size = int(buffer_size_str)
     self.__callbacks = {}
     self.__send(term_to_binary(OtpErlangAtom('init')))
     (self.__prefix,
      self.__timeout_async, self.__timeout_sync,
      self.__priority_default,
      self.__request_timeout_adjustment) = self.__poll_request(False)
Beispiel #6
0
 def accept(self):
     logging.debug('connection accepted')
     conn = self.listener.nextPendingConnection()
     if hasattr(socket, 'fromfd'):
         socket.fromfd(conn.socketDescriptor(), socket.AF_INET, socket.SOCK_STREAM).setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**22)
     self.conns.append(conn)
     conn.readyRead.connect(lambda: self.read_from(conn))
Beispiel #7
0
def hook(uds_datap):
  outer_sock = socket.fromfd(uds_datap.contents.outer_sock, socket.AF_INET,
                              socket.SOCK_STREAM, 0)
  inner_sock = socket.fromfd(uds_datap.contents.inner_sock, socket.AF_INET,
                              socket.SOCK_STREAM, 0)
  custom_hook(outer_sock, inner_sock)
  hookffi.teardown(uds_datap)
  return 0
Beispiel #8
0
    def test_fromfd(self):
        msg = 'hello world'
        x, y = socket.socketpair()
        xx = socket.fromfd(x.fileno(), x.family, socket.SOCK_STREAM)
        x.close()
        yy = socket.fromfd(y.fileno(), y.family, socket.SOCK_STREAM)
        y.close()

        xx.sendall(msg)
        xx.close()
        read = yy.makefile().read()
        self.assertEqual(msg, read)
Beispiel #9
0
def make_server(handler, host='localhost', port=8990):
    pool = Pool(100)
    server = StreamServer((host, port), handler, spawn=pool)

    # graceful startup
    listener_fd, worker_fds = umgmt.graceful_startup(server, 'account', accepted)
    if listener_fd is not None:
        server.set_listener(socket.fromfd(listener_fd, socket.AF_INET, socket.SOCK_STREAM))
    if worker_fds:
        for w in worker_fds:
            s = socket.fromfd(w, socket.AF_INET, socket.SOCK_STREAM)
            gevent.spawn(handler, s, None)

    return server
Beispiel #10
0
def get_socket(index=0):
    """Get an Einhorn-bound socket from the environment.

    Einhorn can bind multiple sockets (via multiple -b arguments), the
    ``index`` parameter can be used to choose which socket to retrieve.  When
    sockets are bound, Einhorn provides several environment variables to child
    worker processes:

    - EINHORN_FD_COUNT: the number of sockets bound
    - EINHORN_FD_#: for each socket bound, the file descriptor for that socket
    - EINHORN_FD_FAMILY_#: for each socket bound, the protocol family of that
        socket (this is a recent addition, so if it's not present default to
        AF_INET)

    :param int index: The socket number to get.
    :rtype: :py:class:`socket.socket`

    """
    if not is_worker():
        raise NotEinhornWorker

    fd_count = get_socket_count()
    if not 0 <= index < fd_count:
        raise IndexError

    fileno = int(os.environ["EINHORN_FD_%d" % index])
    family_name = os.environ.get("EINHORN_FD_FAMILY_%d" % index, "AF_INET")
    assert family_name.startswith("AF_"), "invalid socket family name"
    family = getattr(socket, family_name)
    return socket.fromfd(fileno, family, socket.SOCK_STREAM)
Beispiel #11
0
    def fileDescriptorsReceived(self, fds, message):
        if len(fds) == 1:
            fd = fds[0]

            # Note that we hardcode IPv4 here!
            sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)

            # PROBE: received fd; see porter.py
            self.debug("[fd %5d] (ts %f) received fd from %d, created socket",
                       sock.fileno(), time.time(), fd)

            # Undocumentedly (other than a comment in
            # Python/Modules/socketmodule.c), socket.fromfd() calls dup() on
            # the passed FD before it actually wraps it in a socket object.
            # So, we need to close the FD that we originally had...
            os.close(fd)

            try:
                peeraddr = sock.getpeername()
            except socket.error:
                self.info("Socket disconnected before being passed to client")
                sock.close()
                return

            # Based on bits in tcp.Port.doRead()
            addr = address._ServerFactoryIPv4Address('TCP',
                peeraddr[0], peeraddr[1])
            protocol = self.childFactory.buildProtocol(addr)

            self._connectionClass(sock, protocol, peeraddr, message)
        else:
            self.warning("Unexpected: FD-passing message with len(fds) != 1")
Beispiel #12
0
    def __init__(self, listener, application=None, backlog=2048,
                 socket_type=socket.SOCK_STREAM,
                 address_family=socket.AF_INET):
        self.address_family = address_family
        self.socket_type = socket_type
        host, port = listener

        if isinstance(application, Application):
            self._server = HTTPServer(application)
        elif isinstance(application, TCPServer):
            self._server = application
        elif callable(application):
            tapp = tornado.wsgi.WSGIContainer(application)
            self._server = HTTPServer(tapp)
        else:
            raise TypeError(
                "Unsupported application type: %r" % (application,))

        if host.startswith('fd://'):
            fd = int(host.split('://')[1])
            set_close_exec(fd)
            sock = socket.fromfd(fd, address_family, socket_type)
            sock.setblocking(0)
            socks = [sock]
        elif self.address_family == socket.AF_UNIX:
            filename = host[len('unix:'):]
            sock = tornado.netutil.bind_unix_socket(filename, backlog=backlog)
            socks = [sock]
        else:
            socks = tornado.netutil.bind_sockets(
                port, host, address_family, backlog)
        self._server.add_sockets(socks)
        self.application = application
Beispiel #13
0
def is_socket(fd):
    """ Determine whether the file descriptor is a socket.

        :param fd: The file descriptor to interrogate.
        :return: ``True`` iff the file descriptor is a socket; otherwise
            ``False``.

        Query the socket type of `fd`. If there is no error, the file is a
        socket.

        """
    result = False

    file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)

    try:
        socket_type = file_socket.getsockopt(
                socket.SOL_SOCKET, socket.SO_TYPE)
    except socket.error as exc:
        exc_errno = exc.args[0]
        if exc_errno == errno.ENOTSOCK:
            # Socket operation on non-socket.
            pass
        else:
            # Some other socket error.
            result = True
    else:
        # No error getting socket type.
        result = True

    return result
Beispiel #14
0
	def NewConnection(self, path, fd, properties):
		self.fd = fd.take()
		print("NewConnection(%s, %d)" % (path, self.fd))


		server_sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM)
		server_sock.setblocking(1)
		server_sock.send("This is Edison SPP loopback test\nAll data will be loopback\nPlease start:\n")

		try:
		    while True:
		        data = server_sock.recv(1024)
		        print("received: %s" % data)

			server_sock.send("looping back: %s\n" % data)
			server_sock.send("OK\n")
			
		        file_temp = open('example.txt', 'w')
			file_temp.write(data)
			file_temp.close()
		except IOError:
		    pass

		server_sock.close()
		print("all done")
Beispiel #15
0
 def __init__(self):
     self._send, self._recv = _UNPATCHED_SOCKETPAIR()
     # Assume monkey patching if socket.socketpair is different.
     self._patched = socket.socketpair != _UNPATCHED_SOCKETPAIR
     if self._patched:
         self._send_patched_lock = thread.allocate_lock()
         self._send_patched = socket.fromfd(self._send.fileno(),
             self._send.family, self._send.type)
         self._send_patched.settimeout(None)
         self._recv_patched_lock = thread.allocate_lock()
         self._recv_patched = socket.fromfd(self._recv.fileno(),
             self._recv.family, self._recv.type)
     self._send.settimeout(None)
     self._recv_lock = _UNPATCHED_ALLOCATE_LOCK()
     self._items = []
     self._write_byte = True
Beispiel #16
0
    def _init(self):
        self._pids.clear()

        # getting the initial list of watchers/pids
        res = self.client.send_message('list')

        for watcher in res['watchers']:
            if watcher in ('circusd', 'circushttpd', 'circusd-stats'):
                # this is dealt by the special 'circus' collector
                continue

            pid_list = self.client.send_message('list', name=watcher)
            pids = pid_list.get('pids', [])
            for pid in pids:
                self._append_pid(watcher, pid)

        # getting the circus pids
        self.circus_pids = self.get_circus_pids()
        if 'circus' not in self._callbacks:
            self._add_callback('circus')
        else:
            self._callbacks['circus'].start()

        # getting the initial list of sockets
        res = self.client.send_message('listsockets')
        for sock in res.get('sockets', []):
            fd = sock['fd']
            address = '%s:%s' % (sock['host'], sock['port'])
            # XXX type / family ?
            sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
            self.sockets.append((sock, address, fd))

        self._add_callback('sockets', kind='socket')
Beispiel #17
0
    def NewConnection(self, path, fd, properties):
        self.fd = fd.take()
        print("NewConnection(%s, %d)" % (path, self.fd))


        server_sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM)
        server_sock.setblocking(1)
        server_sock.send("This is Edison SPP loopback test\nAll data will be loopback\nPlease start:\n")

        try:
            while True:
                print("start recv\n")
                data = server_sock.recv(1024)
                print("received: %s" % data)
                fsensor = '/tmp/sensors'
                if os.path.isfile(fsensor):
                    print("1\n")
                    f = open(fsensor, 'r')
                    print("2\n")
                    rsp = f.readline()
                    print("3\n")
                    f.close()
                    print("rsp: %s\n" % rsp)
                    server_sock.send("%s\n" % rsp)
                    print("4\n")
                else:
                    print("sensor file not exists\n")
                    server_sock.send("z\n")
                    print("5\n")
        except IOError:
            print ("IOError")
            pass

        server_sock.close()
        print("all done")
Beispiel #18
0
    def get_request_socket(self, env):
        if not self.ca:
            return None

        sock = None

        if env.get('uwsgi.version'):  # pragma: no cover
            try:
                import uwsgi
                fd = uwsgi.connection_fd()
                conn = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
                try:
                    sock = socket.socket(_sock=conn)
                except:
                    sock = conn
            except Exception as e:
                pass
        elif env.get('gunicorn.socket'):  # pragma: no cover
            sock = env['gunicorn.socket']

        if not sock:
            # attempt to find socket from wsgi.input
            input_ = env.get('wsgi.input')
            if input_:
                if hasattr(input_, '_sock'):  # pragma: no cover
                    raw = input_._sock
                    sock = socket.socket(_sock=raw)  # pragma: no cover
                elif hasattr(input_, 'raw'):
                    sock = input_.raw._sock

        return sock
    def in_q_cb(self, watcher, revents):

        try:
            val = self.in_q.get()
            #val = self.in_q.get(True,interval)
            logging.debug("ServerWorker[{0}:{1}]: Received inQ event!".format(os.getpid(),self.name))
            if type(val) == type((1,)):

                # Construct a proper socket object from the socket FD
                client_socket_handle,client_address = val
                client_fd = rebuild_handle(client_socket_handle)
                client_socket = socket.fromfd(client_fd, socket.AF_INET, socket.SOCK_STREAM)

                logging.debug("ServerWorker[{0}:{1}]: Adding connection [{2}] from [{3}].".format(os.getpid(),self.name,self.client_count,client_address))

                self.client_count += 1
                self.cnxns[client_address] = Connection(client_socket, client_address, self.loop, self.client_count, self)

                self.reset(pyev.EV_READ)

            elif type(val) == type("") and val == "quit":

                logging.info("ServerWorker[{0}:{1}]: Received quit message!".format(os.getpid(),self.name))
                self.stop()

        except Queue.Empty:
            # Timed-out, carry on
            pass
def runremote():
    """Run remote end of embedding module."""
    # get connection parameters
    params = sys.stdin.readline().split()

    if params[0] == 'unix':
        # talk to existing unix domain socket
        listensocket = socket.fromfd( int(params[1]),
                                      socket.AF_UNIX,
                                      socket.SOCK_STREAM )

    elif params[0] == 'internet':
        # talk to internet port
        listensocket = socket.socket( socket.AF_INET,
                                      socket.SOCK_STREAM )
        listensocket.connect( (params[1], int(params[2])) )

    # get secret from stdin and send back to socket
    # this is a security check
    secret = sys.stdin.readline().encode('ascii')
    EmbedApplication.writeToSocket(listensocket, secret)

    # finally start listening application
    app = EmbedApplication(listensocket, [])
    app.setQuitOnLastWindowClosed(False)
    app.exec_()
Beispiel #21
0
    def _setupSocket(self):
        if self._bindAddress is None: # Run as a normal FastCGI?
            isFCGI = True

            sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
                                 socket.SOCK_STREAM)
            try:
                sock.getpeername()
            except socket.error, e:
                if e[0] == errno.ENOTSOCK:
                    # Not a socket, assume CGI context.
                    isFCGI = False
                elif e[0] != errno.ENOTCONN:
                    raise

            # FastCGI/CGI discrimination is broken on Mac OS X.
            # Set the environment variable FCGI_FORCE_CGI to "Y" or "y"
            # if you want to run your app as a simple CGI. (You can do
            # this with Apache's mod_env [not loaded by default in OS X
            # client, ha ha] and the SetEnv directive.)
            if not isFCGI or \
               os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'):
                req = self.cgirequest_class(self)
                req.run()
                sys.exit(0)
Beispiel #22
0
    def __init__(self, host, port, app, handler=None,
                 passthrough_errors=False, ssl_context=None, fd=None):
        if handler is None:
            handler = WSGIRequestHandler

        self.address_family = select_ip_version(host, port)

        if fd is not None:
            real_sock = socket.fromfd(fd, self.address_family,
                                      socket.SOCK_STREAM)
            port = 0
        HTTPServer.__init__(self, (host, int(port)), handler)
        self.app = app
        self.passthrough_errors = passthrough_errors
        self.shutdown_signal = False
        self.host = host
        self.port = port

        # Patch in the original socket.
        if fd is not None:
            self.socket.close()
            self.socket = real_sock
            self.server_address = self.socket.getsockname()

        if ssl_context is not None:
            if isinstance(ssl_context, tuple):
                ssl_context = load_ssl_context(*ssl_context)
            if ssl_context == 'adhoc':
                ssl_context = generate_adhoc_ssl_context()
            self.socket = ssl_context.wrap_socket(self.socket,
                                                  server_side=True)
            self.ssl_context = ssl_context
        else:
            self.ssl_context = None
Beispiel #23
0
    def _setupSocket(self):
        if self._bindAddress is None:  # Run as a normal FastCGI?

            sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
                                 socket.SOCK_STREAM)
            try:
                sock.getpeername()
            except OSError as e:
                if e.errno != errno.ENOTCONN:
                    raise
        else:
            # Run as a server
            oldUmask = None
            if isinstance(self._bindAddress, str):
                # Unix socket
                sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                try:
                    os.unlink(self._bindAddress)
                except OSError:
                    pass
                if self._umask is not None:
                    oldUmask = os.umask(self._umask)
            else:
                # INET socket
                assert type(self._bindAddress) is tuple
                assert len(self._bindAddress) == 2
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            close_on_exec(sock.fileno())
            sock.bind(self._bindAddress)
            sock.listen(socket.SOMAXCONN)

            if oldUmask is not None:
                os.umask(oldUmask)
        return sock
Beispiel #24
0
def is_socket(fd):
    """ Determine if the file descriptor is a socket.

        Return ``False`` if querying the socket type of `fd` raises an
        error; otherwise return ``True``.

        """
    result = False

    file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)

    try:
        file_socket.getsockopt(
            socket.SOL_SOCKET, socket.SO_TYPE)
    except socket.error as exc:
        exc_errno = exc.args[0]
        if exc_errno == errno.ENOTSOCK:
            # Socket operation on non-socket
            pass
        else:
            # Some other socket error
            result = True
    else:
        # No error getting socket type
        result = True

    return result
Beispiel #25
0
 def _process_poll(self, delay):
     try:
         events = self._poll.poll(delay)
     except InterruptedError:
         return
     for fd, event in events:
         ex = None
         if event & EPOLLHUP:
             ex = OSError("socket hanging")
         if event & EPOLLERR:
             sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
             err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
             sock.close()
             if err != 0:
                 ex = OSError(err, "socket error")
         if ex:
             if fd in self._readers:
                 cb = self._readers[fd]
             elif fd in self._writers:
                 cb = self._writers[fd]
             else:
                 cb = None
             if cb:
                 self._soon.append(lambda: cb(fd, event))
         if event & EPOLLIN and fd in self._readers:
             cb = self._readers[fd]
             self._soon.append(lambda: cb(fd, event))
         if event & EPOLLOUT and fd in self._writers:
             cb = self._writers[fd]
             self._soon.append(lambda: cb(fd, event))
    def run(self):
        print ">>> "
        conn = httplib.HTTPConnection(self._host, self._port)
        self._r1 = str(random.randint(0, 1000))
        self._conn_id = random_str(8)
        url = '/'.join([self._prefix, self._r1, self._conn_id, 'xhr_streaming'])
        print "Connecting to URL: ", url
        conn.request('POST', url)
        response = conn.getresponse()
        print "connected: ", response.status
        sock = socket.fromfd(response.fileno(), socket.AF_INET, socket.SOCK_STREAM)
        data = 1
        while data:
            data = sock.recv(1)

            if data == 'o':
                print "Socket connected"
            if data == 'c':
                print "Socket disconnected"
                return
            if data == 'h':
                pass
            if data in ('m', 'a'):
                msg = sock.recv(1000)
                print "Message: ", msg

        time.sleep(0)
        print "server disconnected"
    def __init__(self, plugin, args):
        self.cmdline = False
        if len(args) == 2 and PluginRunner._is_number(args[1]):
            try:
                fd = int(args[1])
                self.tp = _transport.TransPort(
                    socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM))

                #At this point we can return errors to the client, so we can
                #inform the client if the plug-in fails to create itself
                try:
                    self.plugin = plugin()
                except Exception as e:
                    exception_info = sys.exc_info()

                    self.tp.send_error(0, -32099,
                                       'Error instantiating plug-in ' + str(e))
                    raise exception_info[1], None, exception_info[2]

            except Exception:
                error(traceback.format_exc())
                error('Plug-in exiting.')
                sys.exit(2)

        else:
            self.cmdline = True
            cmd_line_wrapper(plugin)
Beispiel #28
0
    def _fromConnectedSocket(cls, fileDescriptor, addressFamily, factory, reactor):
        """
        Create a new L{Server} based on an existing connected I{SOCK_STREAM}
        socket.

        Arguments are the same as to L{Server.__init__}, except where noted.

        @param fileDescriptor: An integer file descriptor associated with a
            connected socket.  The socket must be in non-blocking mode.  Any
            additional attributes desired, such as I{FD_CLOEXEC}, must also be
            set already.

        @param addressFamily: The address family (sometimes called I{domain})
            of the existing socket.  For example, L{socket.AF_INET}.

        @return: A new instance of C{cls} wrapping the socket given by
            C{fileDescriptor}.
        """
        addressType = address.IPv4Address
        if addressFamily == socket.AF_INET6:
            addressType = address.IPv6Address
        skt = socket.fromfd(fileDescriptor, addressFamily, socket.SOCK_STREAM)
        addr = skt.getpeername()
        protocolAddr = addressType("TCP", addr[0], addr[1])
        localPort = skt.getsockname()[1]

        protocol = factory.buildProtocol(protocolAddr)
        if protocol is None:
            skt.close()
            return

        self = cls(skt, protocol, addr, None, addr[1], reactor)
        self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__, self.sessionno, localPort)
        protocol.makeConnection(self)
        return self
Beispiel #29
0
def create_socket(host, port, family=socket.AF_INET, type=socket.SOCK_STREAM,
                  backlog=2048, blocking=True):
    if family == socket.AF_UNIX and not host.startswith('unix:'):
        raise ValueError('Your host needs to have the unix:/path form')
    if host.startswith('unix:') and family != socket.AF_UNIX:
        # forcing to unix socket family
        family = socket.AF_UNIX

    if host.startswith('fd://'):
        # just recreate the socket
        fd = int(host.split('://')[1])
        sock = socket.fromfd(fd, family, type)
    else:
        sock = socket.socket(family, type)
        if host.startswith('unix:'):
            filename = host[len('unix:'):]
            try:
                os.remove(filename)
            except OSError:
                pass
            sock.bind(filename)
        else:
            sock.bind((host, port))
        sock.listen(backlog)

    if blocking:
        sock.setblocking(1)
    else:
        sock.setblocking(0)
    return sock
	def run(self):
		url = "/%s" % '/'.join([self.chan, self._session, self._id, "xhr_streaming"])
		if DEBUG:
			print "TRYING URL %s" % url

		if self.use_ssl:
			con = httplib.HTTPSConnection(self.host, self.port)
		else:
			con = httplib.HTTPConnection(self.host, self.port)
		
		con.request('POST', url)

		r = con.getresponse()
		self.sock = socket.fromfd(r.fileno(), socket.AF_INET, socket.SOCK_STREAM)
		
		data = 1
		while data:
			data = self.sock.recv(1)
			if data == 'o':
				#if DEBUG: print "CONNECTION!"
				pass

			if data == 'c':
				#if DEBUG: print "DISCONNECTION"
				pass

			if data in ('m', 'a'):
				msg = self.sock.recv(1000)
				self.route_annex_channel_message(msg)

		sleep(0)
		if DEBUG: print "Channel to task %s closed." % self.chan
Beispiel #31
0
 def __init__(self, address_info, handler, fd, bind_and_activate=True):
     super().__init__(address_info, handler, bind_and_activate=False)
     self.socket = socket.fromfd(fd, self.address_family, self.socket_type)
     if bind_and_activate:
         # Only activate, as systemd provides ready-bound sockets.
         self.server_activate()
Beispiel #32
0
 def recv_handle(conn):
     '''Receive a handle over a local connection.'''
     with socket.fromfd(conn.fileno(), socket.AF_UNIX,
                        socket.SOCK_STREAM) as s:
         return recvfds(s, 1)[0]
Beispiel #33
0
def create_sockets(conf, log):
    """
    Create a new socket for the given address. If the
    address is a tuple, a TCP socket is created. If it
    is a string, a Unix socket is created. Otherwise
    a TypeError is raised.
    """

    # Systemd support, use the sockets managed by systemd and passed to
    # gunicorn.
    # http://www.freedesktop.org/software/systemd/man/systemd.socket.html
    listeners = []
    if ('LISTEN_PID' in os.environ
            and int(os.environ.get('LISTEN_PID')) == os.getpid()):
        for i in range(int(os.environ.get('LISTEN_FDS', 0))):
            fd = i + SD_LISTEN_FDS_START
            try:
                sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)
                sockname = sock.getsockname()
                if isinstance(sockname, str) and sockname.startswith('/'):
                    listeners.append(UnixSocket(sockname, conf, log, fd=fd))
                elif len(sockname) == 2 and '.' in sockname[0]:
                    listeners.append(TCPSocket("%s:%s" % sockname, conf, log,
                        fd=fd))
                elif len(sockname) == 4 and ':' in sockname[0]:
                    listeners.append(TCP6Socket("[%s]:%s" % sockname[:2], conf,
                        log, fd=fd))
            except socket.error:
                pass
        del os.environ['LISTEN_PID'], os.environ['LISTEN_FDS']

        if listeners:
            log.debug('Socket activation sockets: %s',
                    ",".join([str(l) for l in listeners]))
            return listeners

    # get it only once
    laddr = conf.address

    # check ssl config early to raise the error on startup
    # only the certfile is needed since it can contains the keyfile
    if conf.certfile and not os.path.exists(conf.certfile):
        raise ValueError('certfile "%s" does not exist' % conf.certfile)

    if conf.keyfile and not os.path.exists(conf.keyfile):
        raise ValueError('keyfile "%s" does not exist' % conf.keyfile)

    # sockets are already bound
    if 'GUNICORN_FD' in os.environ:
        fds = os.environ.pop('GUNICORN_FD').split(',')
        for i, fd in enumerate(fds):
            fd = int(fd)
            addr = laddr[i]
            sock_type = _sock_type(addr)

            try:
                listeners.append(sock_type(addr, conf, log, fd=fd))
            except socket.error as e:
                if e.args[0] == errno.ENOTCONN:
                    log.error("GUNICORN_FD should refer to an open socket.")
                else:
                    raise
        return listeners

    # no sockets is bound, first initialization of gunicorn in this env.
    for addr in laddr:
        sock_type = _sock_type(addr)

        # If we fail to create a socket from GUNICORN_FD
        # we fall through and try and open the socket
        # normally.
        sock = None
        for i in range(5):
            try:
                sock = sock_type(addr, conf, log)
            except socket.error as e:
                if e.args[0] == errno.EADDRINUSE:
                    log.error("Connection in use: %s", str(addr))
                if e.args[0] == errno.EADDRNOTAVAIL:
                    log.error("Invalid address: %s", str(addr))
                if i < 5:
                    log.error("Retrying in 1 second.")
                    time.sleep(1)
            else:
                break

        if sock is None:
            log.error("Can't connect to %s", str(addr))
            sys.exit(1)

        listeners.append(sock)

    return listeners
Beispiel #34
0
    def __init__(
        self,
        host: str,
        port: int,
        app: "WSGIApplication",
        handler: t.Optional[t.Type[WSGIRequestHandler]] = None,
        passthrough_errors: bool = False,
        ssl_context: t.Optional[_TSSLContextArg] = None,
        fd: t.Optional[int] = None,
    ) -> None:
        if handler is None:
            handler = WSGIRequestHandler

        # If the handler doesn't directly set a protocol version and
        # thread or process workers are used, then allow chunked
        # responses and keep-alive connections by enabling HTTP/1.1.
        if "protocol_version" not in vars(handler) and (self.multithread
                                                        or self.multiprocess):
            handler.protocol_version = "HTTP/1.1"

        self.host = host
        self.port = port
        self.app = app
        self.passthrough_errors = passthrough_errors

        self.address_family = address_family = select_address_family(
            host, port)
        server_address = get_sockaddr(host, int(port), address_family)

        # Remove a leftover Unix socket file from a previous run. Don't
        # remove a file that was set up by run_simple.
        if address_family == af_unix and fd is None:
            server_address = t.cast(str, server_address)

            if os.path.exists(server_address):
                os.unlink(server_address)

        # Bind and activate will be handled manually, it should only
        # happen if we're not using a socket that was already set up.
        super().__init__(
            server_address,  # type: ignore[arg-type]
            handler,
            bind_and_activate=False,
        )

        if fd is None:
            # No existing socket descriptor, do bind_and_activate=True.
            try:
                self.server_bind()
                self.server_activate()
            except BaseException:
                self.server_close()
                raise
        else:
            # Use the passed in socket directly.
            self.socket = socket.fromfd(fd, address_family, socket.SOCK_STREAM)
            self.server_address = self.socket.getsockname()

        if address_family != af_unix:
            # If port was 0, this will record the bound port.
            self.port = self.server_address[1]

        if ssl_context is not None:
            if isinstance(ssl_context, tuple):
                ssl_context = load_ssl_context(*ssl_context)
            elif ssl_context == "adhoc":
                ssl_context = generate_adhoc_ssl_context()

            self.socket = ssl_context.wrap_socket(self.socket,
                                                  server_side=True)
            self.ssl_context: t.Optional["ssl.SSLContext"] = ssl_context
        else:
            self.ssl_context = None
def main():
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--socket-path",
        default=None,
        help="Path at which the unix socket will be created. Required if the "
        "process is not started via systemd socket activation.")

    parser.add_argument(
        "--smartctl-arg",
        default=[],
        action="append",
        help="Pass an additional argument to the smartctl command. Can be "
        "specified multiple times.")

    parser.add_argument(
        "--timeout",
        default=None,
        type=int,
        help="Time in seconds to wait between connections. Defaults to "
        " infinity.")

    parser.add_argument(
        "-v",
        dest="verbosity",
        action="count",
        default=0,
    )

    args = parser.parse_args()

    logging_kwargs = {}

    sd_fds = systemd.daemon.listen_fds()
    if len(sd_fds) == 0 and args.socket_path is None:
        print(
            "not started via socket activation. --socket-path is required but "
            "not given.",
            file=sys.stderr,
        )
        sys.exit(1)
    elif len(sd_fds) > 1:
        print(
            "too many sockets ({}) passed via systemd socket"
            " activation".format(len(sd_fds), ),
            file=sys.stderr,
        )
        sys.exit(1)
    elif len(sd_fds) == 1:
        logging_kwargs["handlers"] = [
            systemd.journal.JournalHandler(),
        ]
        sock = socket.fromfd(
            sd_fds[0],
            socket.AF_UNIX,
            socket.SOCK_STREAM,
            0,
        )

        if args.timeout is not None:
            sock.settimeout(args.timeout)
    else:
        p = pathlib.Path(args.socket_path).absolute()
        if p.is_socket():
            p.unlink()

        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
        sock.bind(args.socket_path)
        sock.listen(16)

        if args.timeout is not None:
            sock.settimeout(args.timeout)

    logging.basicConfig(level={
        0: logging.ERROR,
        1: logging.WARNING,
        2: logging.INFO,
    }.get(args.verbosity, logging.DEBUG),
                        **logging_kwargs)

    while True:
        try:
            client_sock, addr = sock.accept()
        except socket.timeout:
            return

        try:
            with contextlib.closing(client_sock):
                handle_client(client_sock, args.smartctl_arg)
        except Exception as exc:
            logger.exception("while handling client")
Beispiel #36
0
def fromfd(*args, **kwargs):
    return io.Socket(_socket.fromfd(*args, **kwargs))
def main():
    global interface_name

    arg1 = str(argv[1]) if 1 < len(argv) else None
    arg2 = str(argv[2]) if 2 < len(argv) else None
    no_args = (len(argv) == 1)
    one_arg = (len(argv) == 2)
    two_args = (len(argv) == 3)
    three_or_more_args = (len(argv) > 3)
    help_flag = (str(arg1) == "-h")
    interface_flag = (str(arg1) == "-i")

    if DEBUG_ARGS:
          print("arg1 = {}".format(arg1))
          print("arg2 = {}".format(arg2))
          print("no_args = {}".format(no_args))
          print("one_arg = {}".format(one_arg))
          print("two_args = {}".format(two_args))
          print("three_or_more_args = {}".format(three_or_more_args))
          print("help_flag = {}".format(help_flag))
          print("interface_flag = {}".format(interface_flag))

    if one_arg and help_flag:
          help()
          exit()

    if three_or_more_args or \
           (two_args and not interface_flag) or \
           (one_arg and not help_flag):
          usage()
          exit()

    if two_args and interface_flag:
          interface_name = argv[2]

    bpf = BPF( text=prog, debug=2 )
    is_is_filter = bpf.load_func("isis_filter", BPF.SOCKET_FILTER)

    print("binding socket to '%s'" % interface_name)
    BPF.attach_raw_socket(is_is_filter, interface_name)

    # get file descriptor of the socket previously created inside BPF.attach_raw_socket
    socket_fd = is_is_filter.sock

    #create python socket object, from the file descriptor
    sock = socket.fromfd( socket_fd, socket.AF_PACKET, socket.SOCK_RAW, 0 )
    sock.setblocking(True)

    print( "Starting to listen on socket {} -  use CTRL-C to terminate. \n".format( interface_name ))
    pcap_fp = open( 'data.pcapng', 'wb' );

    shb_opts = [ pcapng.option.ShbHardware( "Dell" ),
                 pcapng.option.ShbOs( "Ubuntu" ),
                 pcapng.option.ShbUserAppl( "IntelliJ Idea" ) ]
    shb_obj = pcapng.block.SectionHeaderBlock( shb_opts )
    shb_packed_bytes = shb_obj.pack()
    pcap_fp.write( shb_packed_bytes )  # must be 1st block

    idb_opts = [ pcapng.option.IdbName( interface_name ),
                 pcapng.option.IdbDescription( "primary interface on host" ),
                 pcapng.option.IdbSpeed( 12345 ) ]
    idb_obj = pcapng.block.InterfaceDescBlock( pcapng.linktype.LINKTYPE_ETHERNET, idb_opts )  # optional block
    pcap_fp.write( idb_obj.pack() )

    count = 0
    while True:
        if False:
            pkt_bytes = get_next_packet( socket_fd )
            dbg_print( pkt_bytes )
            pcap_fp.write( pcapng.block.SimplePacketBlock( pkt_bytes ).pack() )

        if True:
            pkt_bytes = get_next_packet( socket_fd )
            dbg_print( pkt_bytes )

            epb_opts = [ pcapng.option.EpbFlags(       [13,14,15,16] ),
                         pcapng.option.EpbHash(        'just about any hash spec can go here' ),
                         pcapng.option.EpbDropCount(   13 ) ]
            pcap_fp.write( pcapng.block.EnhancedPacketBlock( 0, pkt_bytes, len(pkt_bytes), epb_opts ).pack() )
    def run_analysis(self, master_fd):
        """This method runs the analysis thread.
        @param master_fd the main communication socket to the parent to receive logfile updates from the parent.
        @return 0 on success, e.g. normal termination via signal or 1 on error."""

        # The masterControlSocket is the socket to communicate with the master process to receive commands or logstream data. Expect
        # the parent/child communication socket on fd 3. This also duplicates the fd, so close the old one.
        self.master_control_socket = socket.fromfd(master_fd, socket.AF_UNIX,
                                                   socket.SOCK_DGRAM, 0)
        os.close(master_fd)
        self.tracked_fds_dict[
            self.master_control_socket.fileno()] = self.master_control_socket

        # Locate the real analysis configuration.
        self.analysis_context.build_analysis_pipeline()
        if self.analysis_context.atomizer_factory is None:
            print(
                'FATAL: build_analysis_pipeline() did not initialize atomizer_factory, terminating',
                file=sys.stderr)
            return 1

        real_time_triggered_components = self.analysis_context.real_time_triggered_components
        analysis_time_triggered_components = self.analysis_context.analysis_time_triggered_components

        max_memory_mb = self.analysis_context.aminer_config.config_properties.get(
            AMinerConfig.KEY_RESOURCES_MAX_MEMORY_USAGE, None)
        if max_memory_mb is not None:
            try:
                max_memory_mb = int(max_memory_mb)
                resource.setrlimit(
                    resource.RLIMIT_AS,
                    (max_memory_mb * 1024 * 1024, resource.RLIM_INFINITY))
            except ValueError:
                print('FATAL: %s must be an integer, terminating' %
                      AMinerConfig.KEY_RESOURCES_MAX_MEMORY_USAGE,
                      file=sys.stderr)
                return 1

        max_cpu_percent_usage = self.analysis_context.aminer_config.config_properties.get(
            AMinerConfig.KEY_RESOURCES_MAX_PERCENT_CPU_USAGE)
        if max_cpu_percent_usage is not None:
            try:
                max_cpu_percent_usage = int(max_cpu_percent_usage)
                # limit
                pid = os.getpid()
                package_installed_cmd = ['dpkg', '-l', 'cpulimit']
                cpulimit_cmd = [
                    'cpulimit', '-p',
                    str(pid), '-l',
                    str(max_cpu_percent_usage)
                ]

                # skipcq: BAN-B603
                with subprocess.Popen(package_installed_cmd,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.STDOUT) as out:
                    stdout, _stderr = out.communicate()

                if 'dpkg-query: no packages found matching cpulimit' in stdout.decode(
                ):
                    print(
                        'FATAL: cpulimit package must be installed, when using the property %s'
                        % AMinerConfig.KEY_RESOURCES_MAX_PERCENT_CPU_USAGE,
                        file=sys.stderr)
                    return 1
                # skipcq: BAN-B603
                _out = subprocess.Popen(cpulimit_cmd,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.STDOUT)
            except ValueError:
                print('FATAL: %s must be an integer, terminating' %
                      AMinerConfig.KEY_RESOURCES_MAX_PERCENT_CPU_USAGE,
                      file=sys.stderr)
                return 1

        # Load continuation data for last known log streams. The loaded data has to be a dictionary with repositioning information for
        # each stream. The data is used only when creating the first stream with that name.
        self.repositioning_data_dict = PersistencyUtil.load_json(
            self.persistence_file_name)
        if self.repositioning_data_dict is None:
            self.repositioning_data_dict = {}

        # A list of LogStreams where handleStream() blocked due to downstream not being able to consume the data yet.
        blocked_log_streams = []

        # Always start when number is None.
        next_real_time_trigger_time = None
        next_analysis_time_trigger_time = None

        delayed_return_status = 0
        while self.run_analysis_loop_flag:
            # Build the list of inputs to select for anew each time: the LogStream file descriptors may change due to rollover.
            input_select_fd_list = []
            output_select_fd_list = []
            for fd_handler_object in self.tracked_fds_dict.values():
                if isinstance(fd_handler_object, LogStream):
                    stream_fd = fd_handler_object.get_current_fd()
                    if stream_fd < 0:
                        continue
                    input_select_fd_list.append(stream_fd)
                elif isinstance(fd_handler_object,
                                AnalysisChildRemoteControlHandler):
                    fd_handler_object.add_select_fds(input_select_fd_list,
                                                     output_select_fd_list)
                else:
                    # This has to be a socket, just add the file descriptor.
                    input_select_fd_list.append(fd_handler_object.fileno())

            # Loop over the list in reverse order to avoid skipping elements in remove.
            for log_stream in reversed(blocked_log_streams):
                current_stream_fd = log_stream.handle_stream()
                if current_stream_fd >= 0:
                    self.tracked_fds_dict[current_stream_fd] = log_stream
                    input_select_fd_list.append(current_stream_fd)
                    blocked_log_streams.remove(log_stream)

            read_list = None
            write_list = None
            try:
                (read_list, write_list,
                 _except_list) = select.select(input_select_fd_list,
                                               output_select_fd_list, [], 1)
            except select.error as select_error:
                # Interrupting signals, e.g. for shutdown are OK.
                if select_error[0] == errno.EINTR:
                    continue
                print('Unexpected select result %s' % str(select_error),
                      file=sys.stderr)
                delayed_return_status = 1
                break
            for read_fd in read_list:
                fd_handler_object = self.tracked_fds_dict[read_fd]
                if isinstance(fd_handler_object, LogStream):
                    # Handle this LogStream. Only when downstream processing blocks, add the stream to the blocked stream list.
                    handle_result = fd_handler_object.handle_stream()
                    if handle_result < 0:
                        # No need to care if current internal file descriptor in LogStream has changed in handleStream(),
                        # this will be handled when unblocking.
                        del self.tracked_fds_dict[read_fd]
                        blocked_log_streams.append(fd_handler_object)
                    elif handle_result != read_fd:
                        # The current fd has changed, update the tracking list.
                        del self.tracked_fds_dict[read_fd]
                        self.tracked_fds_dict[
                            handle_result] = fd_handler_object
                    continue

                if isinstance(fd_handler_object,
                              AnalysisChildRemoteControlHandler):
                    try:
                        fd_handler_object.do_receive()
                    except ConnectionError as receiveException:
                        print('Unclean termination of remote control: %s' %
                              str(receiveException),
                              file=sys.stderr)
                    if fd_handler_object.is_dead():
                        del self.tracked_fds_dict[read_fd]
                    # Reading is only attempted when output buffer was already flushed. Try processing the next request to fill the output
                    # buffer for next round.
                    else:
                        fd_handler_object.do_process(self.analysis_context)
                    continue

                if fd_handler_object == self.master_control_socket:
                    self.handle_master_control_socket_receive()
                    continue

                if fd_handler_object == self.remote_control_socket:
                    # We received a remote connection, accept it unconditionally. Users should make sure, that they do not exhaust
                    # resources by hogging open connections.
                    (control_client_socket,
                     _remote_address) = self.remote_control_socket.accept()
                    # Keep track of information received via this remote control socket.
                    remote_control_handler = AnalysisChildRemoteControlHandler(
                        control_client_socket)
                    self.tracked_fds_dict[control_client_socket.fileno(
                    )] = remote_control_handler
                    continue
                raise Exception('Unhandled object type %s' %
                                type(fd_handler_object))

            for write_fd in write_list:
                fd_handler_object = self.tracked_fds_dict[write_fd]
                if isinstance(fd_handler_object,
                              AnalysisChildRemoteControlHandler):
                    buffer_flushed_flag = False
                    try:
                        buffer_flushed_flag = fd_handler_object.do_send()
                    except OSError as sendError:
                        print('Error sending data via remote control: %s' %
                              str(sendError),
                              file=sys.stderr)
                        try:
                            fd_handler_object.terminate()
                        except ConnectionError as terminateException:
                            print('Unclean termination of remote control: %s' %
                                  str(terminateException),
                                  file=sys.stderr)
                    if buffer_flushed_flag:
                        fd_handler_object.do_process(self.analysis_context)
                    if fd_handler_object.is_dead():
                        del self.tracked_fds_dict[write_fd]
                    continue
                raise Exception('Unhandled object type %s' %
                                type(fd_handler_object))

            # Handle the real time events.
            real_time = time.time()
            if next_real_time_trigger_time is None or real_time >= next_real_time_trigger_time:
                next_trigger_offset = 3600
                for component in real_time_triggered_components:
                    next_trigger_request = component.do_timer(real_time)
                    next_trigger_offset = min(next_trigger_offset,
                                              next_trigger_request)
                next_real_time_trigger_time = real_time + next_trigger_offset

            # Handle the analysis time events. The analysis time will be different when an analysis time component is registered.
            analysis_time = self.analysis_context.analysis_time
            if analysis_time is None:
                analysis_time = real_time
            if next_analysis_time_trigger_time is None or analysis_time >= next_analysis_time_trigger_time:
                next_trigger_offset = 3600
                for component in analysis_time_triggered_components:
                    next_trigger_request = component.do_timer(real_time)
                    next_trigger_offset = min(next_trigger_offset,
                                              next_trigger_request)
                next_analysis_time_trigger_time = analysis_time + next_trigger_offset

        # Analysis loop is only left on shutdown. Try to persist everything and leave.
        PersistencyUtil.persist_all()
        return delayed_return_status
Beispiel #39
0
 def fd(self):
     return socket.fromfd(libnfct.nfct_fd(self), socket.AF_UNSPEC,
                          socket.SOCK_STREAM)
Beispiel #40
0
 def __init__(self, fd=None):
     if fd:
         self.socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
     else:
         self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Beispiel #41
0
            ssl.SSLContext.sslsocket_class = fingerprint_checking_SSLSocket(
                fingerprint)
        else:
            ssl.SSLSocket = fingerprint_checking_SSLSocket(fingerprint)

    certs = []
    if asn1crypto:
        if 'TNCC_CERTS' in os.environ:
            now = datetime.datetime.utcnow()
            for f in os.environ['TNCC_CERTS'].split(','):
                cert = x509cert(f.strip())
                if now < cert.not_before:
                    logging.warn('WARNING: %s is not yet valid', f)
                if now > cert.not_after:
                    logging.warn('WARNING: %s is expired', f)
                certs.append(cert)
    else:
        raise Exception(
            'TNCC_CERTS environment variable set, but asn1crypto module is not available'
        )

    # \HKEY_CURRENT_USER\Software\Juniper Networks\Device Id
    device_id = os.environ.get('TNCC_DEVICE_ID')

    t = tncc(vpn_host, device_id, funk, platform, hostname, mac_addrs, certs,
             interval, user_agent)
    sock = socket.fromfd(0, socket.AF_UNIX, socket.SOCK_SEQPACKET)
    server = tncc_server(sock, t)
    while True:
        server.process_cmd()
Beispiel #42
0
        data = f.read()
        return data


def modify(packet):
    global text_to_hide
    global byte_num

    if byte_num < len(text_to_hide):
        scapy_pkt = IP(packet.get_payload())
        scapy_pkt = prepare_message(scapy_pkt)
        packet.set_payload(bytes(scapy_pkt))

    packet.accept()


nfqueue = NetfilterQueue()
nfqueue.bind(0, modify)
s = socket.fromfd(nfqueue.get_fd(), socket.AF_UNIX, socket.SOCK_STREAM)

text_to_hide = read_file("/stegano/Antygona.txt")
byte_num = 0

try:
    nfqueue.run_socket(s)
except KeyboardInterrupt:
    print('')

s.close()
nfqueue.unbind()
Beispiel #43
0
        return self.fd


    def doRead(self):
        """
        A message is ready to read.  Receive a file descriptor from our parent
        process.
        """
        try:
            fd, description = recvfd(self.fd)
        except SocketError, se:
            if se.errno != EAGAIN:
                raise
        else:
            try:
                skt = fromfd(fd, getsockfam(fd), SOCK_STREAM)
                close(fd)       # fromfd() calls dup()
                try:
                    peeraddr = skt.getpeername()
                except SocketError:
                    peeraddr = ('0.0.0.0', 0)
                protocol = self.protocolFactory.buildProtocol(peeraddr)
                transport = self.transportFactory(skt, peeraddr,
                                                  description, protocol)
                protocol.makeConnection(transport)
            except:
                log.failure("doRead()")


    def doWrite(self):
        """
Beispiel #44
0
def fromfd(fd, family, type_, proto=0):
    s = socket.fromfd(fd, family, type_, proto)
    if s.__class__ is not socket.socket:
        s = socket.socket(_sock=s)
    return s
Beispiel #45
0
def main(latency_control, auto_hosts):
    debug1('Starting server with Python version %s\n' %
           platform.python_version())

    if helpers.verbose >= 1:
        helpers.logprefix = ' s: '
    else:
        helpers.logprefix = 'server: '
    debug1('latency control setting = %r\n' % latency_control)

    routes = list(list_routes())
    debug1('available routes:\n')
    for r in routes:
        debug1('  %d/%s/%d\n' % r)

    # synchronization header
    sys.stdout.write('\0\0SSHUTTLE0001')
    sys.stdout.flush()

    handlers = []
    mux = Mux(
        socket.fromfd(sys.stdin.fileno(), socket.AF_INET, socket.SOCK_STREAM),
        socket.fromfd(sys.stdout.fileno(), socket.AF_INET, socket.SOCK_STREAM))
    handlers.append(mux)
    routepkt = ''
    for r in routes:
        routepkt += '%d,%s,%d\n' % r
    mux.send(0, ssnet.CMD_ROUTES, b(routepkt))

    hw = Hostwatch()
    hw.leftover = b('')

    def hostwatch_ready(sock):
        assert (hw.pid)
        content = hw.sock.recv(4096)
        if content:
            lines = (hw.leftover + content).split(b('\n'))
            if lines[-1]:
                # no terminating newline: entry isn't complete yet!
                hw.leftover = lines.pop()
                lines.append(b(''))
            else:
                hw.leftover = b('')
            mux.send(0, ssnet.CMD_HOST_LIST, b('\n').join(lines))
        else:
            raise Fatal('hostwatch process died')

    def got_host_req(data):
        if not hw.pid:
            (hw.pid, hw.sock) = start_hostwatch(data.strip().split(),
                                                auto_hosts)
            handlers.append(Handler(socks=[hw.sock], callback=hostwatch_ready))

    mux.got_host_req = got_host_req

    def new_channel(channel, data):
        (family, dstip, dstport) = data.decode("ASCII").split(',', 2)
        family = int(family)
        dstport = int(dstport)
        outwrap = ssnet.connect_dst(family, dstip, dstport)
        handlers.append(Proxy(MuxWrapper(mux, channel), outwrap))

    mux.new_channel = new_channel

    dnshandlers = {}

    def dns_req(channel, data):
        debug2('Incoming DNS request channel=%d.\n' % channel)
        h = DnsProxy(mux, channel, data)
        handlers.append(h)
        dnshandlers[channel] = h

    mux.got_dns_req = dns_req

    udphandlers = {}

    def udp_req(channel, cmd, data):
        debug2('Incoming UDP request channel=%d, cmd=%d\n' % (channel, cmd))
        if cmd == ssnet.CMD_UDP_DATA:
            (dstip, dstport, data) = data.split(",", 2)
            dstport = int(dstport)
            debug2('is incoming UDP data. %r %d.\n' % (dstip, dstport))
            h = udphandlers[channel]
            h.send((dstip, dstport), data)
        elif cmd == ssnet.CMD_UDP_CLOSE:
            debug2('is incoming UDP close\n')
            h = udphandlers[channel]
            h.ok = False
            del mux.channels[channel]

    def udp_open(channel, data):
        debug2('Incoming UDP open.\n')
        family = int(data)
        mux.channels[channel] = lambda cmd, data: udp_req(channel, cmd, data)
        if channel in udphandlers:
            raise Fatal('UDP connection channel %d already open' % channel)
        else:
            h = UdpProxy(mux, channel, family)
            handlers.append(h)
            udphandlers[channel] = h

    mux.got_udp_open = udp_open

    while mux.ok:
        if hw.pid:
            assert (hw.pid > 0)
            (rpid, rv) = os.waitpid(hw.pid, os.WNOHANG)
            if rpid:
                raise Fatal('hostwatch exited unexpectedly: code 0x%04x\n' %
                            rv)

        ssnet.runonce(handlers, mux)
        if latency_control:
            mux.check_fullness()

        if dnshandlers:
            now = time.time()
            remove = []
            for channel, h in dnshandlers.items():
                if h.timeout < now or not h.ok:
                    debug3('expiring dnsreqs channel=%d\n' % channel)
                    remove.append(channel)
                    h.ok = False
            for channel in remove:
                del dnshandlers[channel]
        if udphandlers:
            remove = []
            for channel, h in udphandlers.items():
                if not h.ok:
                    debug3('expiring UDP channel=%d\n' % channel)
                    remove.append(channel)
                    h.ok = False
            for channel in remove:
                del udphandlers[channel]
Beispiel #46
0
 def reduce_connection(conn):
     handle = conn.fileno()
     with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
         from . import resource_sharer
         ds = resource_sharer.DupSocket(s)
         return rebuild_connection, (ds, conn.readable, conn.writable)
Beispiel #47
0
def main():
    parser = optparse.OptionParser()
    parser.add_option('-i', '--inetd',
                      help='Run once on stdin (inetd mode)',
                      action='store_true', dest='stdin', default=False)
    parser.add_option('-l', '--port',
                      help='What port to run on (default 2000)',
                      action='store', dest='port', default=None, type='int')
    parser.add_option('-b', '--bindaddr',
                      help='What IP address to bind to (default all)',
                      action='store', dest='bindaddr', default=None)
    parser.add_option('-p', '--pidfile',
                      help='Where to write a PID file',
                      action='store', dest='pidfile', default=None)
    parser.add_option('-c', '--config',
                      help='Location of config file',
                      action='store', dest='config',
                      default='/usr/local/etc/pysieved.ini')
    parser.add_option('-v', '--verbosity',
                      help="Set logging verbosity level (default 1)",
                      action='store', dest='verbosity', default=1, type='int')
    parser.add_option('-d', '--debug',
                      help='Log to stderr (implies --no-daemon)',
                      action='store_true', dest='debug', default=False)
    parser.add_option('-B', '--base',
                      help='Mail base directory',
                      action='store', dest='base', default='')
    parser.add_option('-t', '--tmpdir',
                      help='Temp directory',
                      action='store', dest='tmpdir', default='')
    parser.add_option('-T', '--tls',
                      help='STARTTLS required before authentication',
                      action='store_true', dest='tls_required', default=False)
    parser.add_option('-K', '--key',
                      help='TLS private key file',
                      action='store', dest='tls_key', default='')
    parser.add_option('-C', '--cert',
                      help='TLS certificate file',
                      action='store', dest='tls_cert', default='')
    parser.add_option('--no-daemon',
                      help='Do not daemonize (but stay in foreground)',
                      action='store_true', dest='nodaemon', default=False)
    (options, args) = parser.parse_args()

    # Read config file
    config = Config(options.config)

    port = options.port or config.getint('main', 'port', 2000)
    addr = options.bindaddr or config.get('main', 'bindaddr', '')
    pidfile = options.pidfile or config.get('main', 'pidfile',
                                            '/var/run/pysieved.pid')
    base = options.base or config.get('main', 'base', '')
    tmpdir = options.tmpdir or config.get('main', 'tmpdir', '') or \
             os.environ.get('TMPDIR', '/tmp')
    tls_required = options.tls_required or config.getboolean('TLS', 'required', False)
    tls_key = options.tls_key or config.get('TLS', 'key', '')
    tls_cert = options.tls_cert or config.get('TLS', 'cert', '')
    tls_passphrase = config.get('TLS', 'passphrase', '')

    if options.debug:
        options.nodaemon = True

    # Define the log function
    syslog.openlog('pysieved[%d]' % (os.getpid()), 0, syslog.LOG_MAIL)
    def log(l, s):
        if l <= options.verbosity:
            if options.debug:
                sys.stderr.write('%s %s\n' % ("=" * l, s))
            else:
                if l > 0:
                    lvl = syslog.LOG_NOTICE
                elif l == 0:
                    lvl = syslog.LOG_WARNING
                else:
                    lvl = syslog.LOG_ERR
                syslog.syslog(lvl, s)


    # Load TLS key and cert
    tls_privateKey = None
    tls_certChain = None
    if tls_key or tls_cert:
        # Expect to use TLS
        if not have_tls:
            log(1, "TLSLite is not available. STARTTLS will not be offered")
            tls_required = False
        elif not tls_key:
            log(1, "Cannot enable TLS without a key. STARTTLS will not be offered")
            tls_required = False
        elif not tls_cert:
            log(1, "Cannot enable TLS without a certificate. STARTTLS will not be offered")
            tls_required = False
        else:
            try:
                tls_read_cert = open(tls_cert).read()
                tls_x509 = X509()
                tls_x509.parse(tls_read_cert)
                tls_certChain = X509CertChain([tls_x509])
                tls_read_key = open(tls_key).read()

                def passphrase():
                    return tls_passphrase

                tls_privateKey = parsePEMKey(tls_read_key, private=True, passwordCallback=passphrase)
            except:
                log(1, "Failed to load TLS key or certificate. STARTTLS will not be offered.")
                tls_certChain = None
                tls_privateKey = None
                tls_required = False


    ##
    ## Import plugins
    ##
    auth = __import__('plugins.%s' % config.get('main', 'auth', 'SASL').lower(),
                      None, None, True)
    userdb = __import__('plugins.%s' % config.get('main', 'userdb', 'passwd').lower(),
                      None, None, True)
    storage = __import__('plugins.%s' % config.get('main', 'storage', 'Dovecot').lower(),
                         None, None, True)
    consumer = __import__('plugins.%s' % config.get('main', 'consumer', 'Dovecot').lower(),
                          None, None, True)


    # If the same plugin is used in two places, recycle it
    authenticate = auth.PysievedPlugin(log, config)

    if userdb == auth:
        homedir = authenticate
    else:
        homedir = userdb.PysievedPlugin(log, config)

    if storage == auth:
        store = authenticate
    elif storage == userdb:
        store = homedir
    else:
        store = storage.PysievedPlugin(log, config)

    if consumer == auth:
        consume = authenticate
    elif consumer == userdb:
        consume = homedir
    elif consumer == storage:
        consume = store
    else:
        consume = consumer.PysievedPlugin(log, config)


    class handler(managesieve.RequestHandler):
        capabilities = consume.capabilities

        def __init__(self, *args):
            self.params = {}
            managesieve.RequestHandler.__init__(self, *args)

        def log(self, l, s):
            log(l, s)

        def list_mech(self):
            mechs = authenticate.mechanisms()
            self.log(5, "Announcing mechanisms : %r" % mechs)
            return mechs

        def do_sasl_first(self, mechanism, *args):
            self.log(5, "Starting SASL authentication (%s) : %s" % (mechanism, ' '.join(args)))
            ret = authenticate.do_sasl_first(mechanism, *args);
            if ret['result'] == 'CONT':
                self.log(5, "Need more SASL authentication : %r" % ret)
            else:
                self.log(5, "Finished SASL authentication : %r" % ret)
            return ret

        def do_sasl_next(self, b64_string):
            self.log(5, "Continuing SASL authentication : %s" % b64_string)
            ret = authenticate.do_sasl_next(b64_string);
            if ret['result'] == 'CONT':
                self.log(5, "Need more SASL authentication : %r" % ret)
            else:
                self.log(5, "Finished SASL authentication : %r" % ret)
            return ret

        def authenticate(self, username, passwd):
            self.log(5, "Authenticating %s" % username)
            self.params['username'] = username
            self.params['password'] = passwd
            return authenticate.auth(self.params)

        def get_homedir(self, username):
            self.params['username'] = username
            ret = homedir.lookup(self.params)
            self.log(5, "Plugin returned home : %r" % ret)
            if ret and not os.path.isabs(ret) and base:
                ret = os.path.join(base, ret)
                self.log(5, "Added base to home : %r" % ret)
            return ret

        def new_storage(self, homedir):
            self.params['homedir'] = homedir
            return store.create_storage(self.params)

        def get_tls_params(self):
            return {'required': tls_required,
                    'key': tls_privateKey,
                    'cert': tls_certChain}

        def pre_save(self, script):
            return consume.pre_save(tmpdir, script)

        def post_load(self, script):
            return consume.post_load(script)

    if options.stdin:
        sock = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM)
        h = handler(sock, sock.getpeername(), None)
    else:
        import daemon

        s = Server((addr, port), handler)

        if not options.nodaemon:
            daemon.daemon(pidfile=pidfile)
        log(1, 'Listening on %s port %d' % (addr or "INADDR_ANY", port))
        s.serve_forever()
Beispiel #48
0
def check_connection_ntuple(conn):
    """Check validity of a connection namedtuple."""
    # check ntuple
    assert len(conn) in (6, 7), conn
    has_pid = len(conn) == 7
    has_fd = getattr(conn, 'fd', -1) != -1
    assert conn[0] == conn.fd
    assert conn[1] == conn.family
    assert conn[2] == conn.type
    assert conn[3] == conn.laddr
    assert conn[4] == conn.raddr
    assert conn[5] == conn.status
    if has_pid:
        assert conn[6] == conn.pid

    # check fd
    if has_fd:
        assert conn.fd >= 0, conn
        if hasattr(socket, 'fromfd') and not WINDOWS:
            try:
                dupsock = socket.fromfd(conn.fd, conn.family, conn.type)
            except (socket.error, OSError) as err:
                if err.args[0] != errno.EBADF:
                    raise
            else:
                with contextlib.closing(dupsock):
                    assert dupsock.family == conn.family
                    assert dupsock.type == conn.type

    # check family
    assert conn.family in (AF_INET, AF_INET6, AF_UNIX), repr(conn.family)
    if conn.family in (AF_INET, AF_INET6):
        # actually try to bind the local socket; ignore IPv6
        # sockets as their address might be represented as
        # an IPv4-mapped-address (e.g. "::127.0.0.1")
        # and that's rejected by bind()
        if conn.family == AF_INET:
            s = socket.socket(conn.family, conn.type)
            with contextlib.closing(s):
                try:
                    s.bind((conn.laddr[0], 0))
                except socket.error as err:
                    if err.errno != errno.EADDRNOTAVAIL:
                        raise
    elif conn.family == AF_UNIX:
        assert conn.status == psutil.CONN_NONE, conn.status

    # check type (SOCK_SEQPACKET may happen in case of AF_UNIX socks)
    assert conn.type in (SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET), \
        repr(conn.type)
    if conn.type == SOCK_DGRAM:
        assert conn.status == psutil.CONN_NONE, conn.status

    # check laddr (IP address and port sanity)
    for addr in (conn.laddr, conn.raddr):
        if conn.family in (AF_INET, AF_INET6):
            assert isinstance(addr, tuple), addr
            if not addr:
                continue
            assert isinstance(addr.port, int), addr.port
            assert 0 <= addr.port <= 65535, addr.port
            check_net_address(addr.ip, conn.family)
        elif conn.family == AF_UNIX:
            assert isinstance(addr, str), addr

    # check status
    assert isinstance(conn.status, str), conn
    valids = [getattr(psutil, x) for x in dir(psutil) if x.startswith('CONN_')]
    assert conn.status in valids, conn
Beispiel #49
0
    def _setup_queue(self, queue_id, ctx, verdict_callback):
        def verdict_callback_ind(queue_handle, nfmsg, nfa, _data):
            packet_ptr = ctypes.c_void_p(0)

            # logger.debug("verdict cb for queue %d", queue_id)
            pkg_hdr = get_msg_packet_hdr(nfa)
            packet_id = ntohl(pkg_hdr.contents.packet_id)
            linklayer_protoid = htons(pkg_hdr.contents.hw_protocol)

            len_recv, data = get_full_payload(nfa, packet_ptr)

            try:
                # TODO: test this
                hw_info = get_packet_hw(nfa).contents
                hw_addrlen = ntohs(hw_info.hw_addrlen)
                hw_addr = ctypes.string_at(hw_info.hw_addr, size=hw_addrlen)
                #hw_addr = None
            except:
                # hw address not always present, eg DHCP discover -> offer...
                hw_addr = None

            data_ret, verdict = data, NF_DROP

            try:
                data_ret, verdict = verdict_callback(hw_addr,
                                                     linklayer_protoid, data,
                                                     ctx)
            except Exception as ex:
                logger.warning(
                    "Verdict callback problem, packet will be dropped: %r", ex)

            set_verdict(queue_handle, packet_id, verdict, len(data_ret),
                        ctypes.c_char_p(data_ret))

        nfq_handle = ll_open_queue()  # 2

        # TODO: what about IPv6?
        unbind_pf(nfq_handle, socket.AF_INET)
        bind_pf(nfq_handle, socket.AF_INET)

        c_handler = HANDLER(verdict_callback_ind)
        queue = create_queue(nfq_handle, queue_id, c_handler, None)  # 1

        set_mode(queue, NFQNL_COPY_PACKET, 0xFFFF)

        nf = nfnlh(nfq_handle)
        fd = nfq_fd(nf)
        # fd, family, sockettype
        nfq_socket = socket.fromfd(fd, 0, 0)  # 3
        # TODO: better solution to check for running state? close socket and raise exception does not work in stop()
        nfq_socket.settimeout(1)

        thread = threading.Thread(target=Interceptor.verdict_trigger_cycler,
                                  args=[nfq_socket.recv, nfq_handle, self])

        thread.start()

        qconfig = Interceptor.QueueConfig(queue=queue,
                                          queue_id=queue_id,
                                          nfq_handle=nfq_handle,
                                          nfq_socket=nfq_socket,
                                          verdictthread=thread,
                                          handler=c_handler)
        self._netfilterqueue_configs.append(qconfig)
Beispiel #50
0
def fromfd(*args, **kwargs):
    """Like :func:`socket.fromfd`, but returns a trio socket object.

    """
    return from_stdlib_socket(_stdlib_socket.fromfd(*args, **kwargs))
Beispiel #51
0
#!/usr/bin/env python
# encoding: utf-8
"""
@desc:
@software: pycharm
@file: in_zen2.py
@time: 2018/10/29 23:51
@author: liuzy
@contact: [email protected]
@license: (C) Copyright 2015-2018, Node Supply Chain Manager Corporation Limited.
"""
import socket, sys, zen_utils

if __name__ == '__main__':
    listener = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM)
    sys.stdin = open('/dev/null', 'r')
    sys.stdout = sys.stderr = open('log.txt', 'a', buffering=1)
    listener.settimeout(8.0)
    try:
        zen_utils.accept_connection_forever(listener)
    except socket.timeout:
        print(f"waited 8 seconds with no further connection shuting down")
Beispiel #52
0
def fromfd(fd, family, type, proto=0):
    """Like :func:`socket.fromfd`, but returns a trio socket object.

    """
    family, type, proto = _sniff_sockopts_for_fileno(family, type, proto, fd)
    return from_stdlib_socket(_stdlib_socket.fromfd(fd, family, type, proto))
Beispiel #53
0
    def __init__(self,
                 server_address,
                 RequestHandlerClass,
                 bind_and_activate=True):
        self.remove_file = None
        self.mode = None
        self.listen_fd = get_listen_fd()

        if self.listen_fd:
            server_address = self.listen_fd
            self.address_family = socket.AF_UNIX
            self.socket = socket.fromfd(self.listen_fd, socket.AF_UNIX,
                                        socket.SOCK_STREAM)

        elif server_address.startswith("unix:"):
            self.address_family = socket.AF_UNIX
            address = server_address[5:]
            m = address.rfind(';mode=')
            if m != -1:
                self.mode = address[m + 6:]
                address = address[:m]

            if address[0] == '@':
                address = address.replace('@', '\0', 1)
                self.mode = None
            else:
                self.remove_file = address

            server_address = address
            self.socket = socket.socket(self.address_family, self.socket_type)
            self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

        elif server_address.startswith("tcp:"):
            address = server_address[4:]
            p = address.rfind(':')
            if p != -1:
                port = int(address[p + 1:])
                address = address[:p]
            else:
                raise ConnectionError("Invalid address 'tcp:%s'" % address)
            address = address.replace('[', '')
            address = address.replace(']', '')

            try:
                res = socket.getaddrinfo(address,
                                         port,
                                         proto=socket.IPPROTO_TCP,
                                         flags=socket.AI_NUMERICHOST)
            except TypeError:
                res = socket.getaddrinfo(address, port, self.address_family,
                                         self.socket_type, socket.IPPROTO_TCP,
                                         socket.AI_NUMERICHOST)

            af, socktype, proto, canonname, sa = res[0]
            self.address_family = af
            self.socket_type = socktype
            self.socket = socket.socket(self.address_family, self.socket_type)
            server_address = sa[0:2]

        else:
            raise ConnectionError("Invalid address '%s'" % server_address)

        BaseServer.__init__(self, server_address, RequestHandlerClass)

        if bind_and_activate:
            try:
                self.server_bind()
                self.server_activate()
            except:
                self.server_close()
                raise
Beispiel #54
0
    def wcb_handle_udp_input(self, data, fd):
        sock = socket.fromfd(fd, socket.AF_INET6, socket.SOCK_DGRAM)
        data, addr = sock.recvfrom(1024)
        host, port = socket.getnameinfo(
            addr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
        try:
            str_data = data.decode('utf-8')
        except Exception as err:
            return dlog(
                "UDP message from [%s]:%s did not decode as valid UTF-8: '%s'"
                % (host, port, data))

        # Expect -at least- three words input
        # Password TargetChannel Message[..]
        re_sult = re.match('^(\S+)\s+(\S+)\s+(.*)', str_data)
        if not re_sult:
            return dlog("UDP message from [%s]:%s not properly formed: '%s'" %
                        (host, port, data))

        password = re_sult.group(1)
        channel = re_sult.group(2)
        message = re_sult.group(3)

        if password != self.state['udp_listen_pass']:
            return dlog("UDP message from [%s]:%s had bad password: '******'" %
                        (host, port, data))

        if not message:
            return dlog('UDP message from [%s]:%s had empty message!' %
                        (host, port))

        # Make passing message to weechat.command() "safer" by replacing leading '/'-chars :)
        if message[0] == '/':
            message = '_' + message[1:]
        message = message.replace('\\', '\\\\')

        # Check if channel is indeed the channel, or the server name.
        # If it smells like a server name, strip of the next word from
        # str_data to use as the new channel name.
        server = 'undef'
        if not re.match('^[#&]', channel):
            server = channel
            re_sult = re.match('^(\S+)\s', message)
            channel = re_sult.group(1)
            if not re.match('^[#&]', channel):
                dlog("This '%s' does not look like a channel name" % channel)
                dlog(
                    "UDP message from [%s]:%s failed to properly parse: '%s'" %
                    (host, port, str_data))
                return self.weechat.WEECHAT_RC_ERROR
            message = re.sub('^(\S+)\s', '', message)

        if self.state['debug_udp']:
            dlog("UDP message from [%s]:%s to '%s.%s': '%s'" %
                 (host, port, server, channel, message))

        if server == 'undef':  # Search for first matching buffer in any server
            servers = self.weechat.infolist_get("irc_server", "", "")
            while self.weechat.infolist_next(servers):
                irc_server_name = self.weechat.infolist_string(servers, "name")
                target = irc_server_name + '.' + channel
                udp_output_buffer = self.weechat.buffer_search(
                    'irc', '(?i)' + target)  # (?i) case insensitive
                if udp_output_buffer:
                    self.weechat.command(udp_output_buffer, message)
                    if self.state['debug_udp']:
                        dlog("Found channel '%s' in server '%s'" %
                             (channel, irc_server_name))
                    self.weechat.infolist_free(servers)
                    return weechat.WEECHAT_RC_OK
            return dlog("Could not find '%s' buffer in any irc_server." %
                        (channel))

        target = server + '.' + channel
        udp_output_buffer = self.weechat.buffer_search(
            'irc', '(?i)' + target)  # (?i) case insensitive
        if udp_output_buffer:
            self.weechat.command(udp_output_buffer, message)
            return weechat.WEECHAT_RC_OK

        dlog("Could not find '%s' buffer in '%s' irc_server." %
             (channel, server))
        dlog("UDP message from [%s]:%s to '%s.%s': '%s'" %
             (host, port, server, channel, message))
        return self.weechat.WEECHAT_RC_ERROR
# load eBPF program http_filter of type SOCKET_FILTER into the kernel eBPF vm
# more info about eBPF program types http://man7.org/linux/man-pages/man2/bpf.2.html
function_vxlan_filter = bpf.load_func("vxlan_filter", BPF.SOCKET_FILTER)

# create raw socket, bind it to eth0
# attach bpf program to socket created
BPF.attach_raw_socket(function_vxlan_filter, "eno5")

ni.ifaddresses('eno2')
ip = ni.ifaddresses('eno2')[ni.AF_INET][0]['addr']

# get file descriptor of the socket previously created inside BPF.attach_raw_socket
socket_fd = function_vxlan_filter.sock

# create python socket object, from the file descriptor
sock = socket.fromfd(socket_fd, socket.PF_PACKET, socket.SOCK_RAW,
                     socket.IPPROTO_IP)

# set it as blocking socket
sock.setblocking(True)

print(
    "hosname, MachineIP   ipver     Src IP Addr     Dst IP Addr     Src_Port   Des_Port  Local_Src_Addr   Local_des_Addr     Local_Src_Port   Local_Des_Port VNI  VLANID  protocol  Packet Length "
)
count_c1 = 0

while 1:
    # retrieve raw packet from socket
    packet_str = os.read(socket_fd, 2048)

    # convert packet into bytearray
    packet_bytearray = bytearray(packet_str)
Beispiel #56
0
 def send_handle(conn, handle, destination_pid):
     '''Send a handle over a local connection.'''
     with socket.fromfd(conn.fileno(), socket.AF_UNIX,
                        socket.SOCK_STREAM) as s:
         sendfds(s, [handle])
Beispiel #57
0
 def _rebuild_socket(df, family, type, proto):
     fd = df.detach()
     return socket.fromfd(fd, family, type, proto)
Beispiel #58
0
    async def startup(self, sockets=None):
        await self.lifespan.startup()
        if self.lifespan.should_exit:
            self.should_exit = True
            return

        config = self.config

        create_protocol = functools.partial(config.http_protocol_class,
                                            config=config,
                                            server_state=self.server_state)

        loop = asyncio.get_event_loop()

        if sockets is not None:
            # Explicitly passed a list of open sockets.
            # We use this when the server is run from a Gunicorn worker.
            self.servers = []
            for sock in sockets:
                server = await loop.create_server(create_protocol,
                                                  sock=sock,
                                                  ssl=config.ssl,
                                                  backlog=config.backlog)
                self.servers.append(server)

        elif config.fd is not None:
            # Use an existing socket, from a file descriptor.
            sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)
            server = await loop.create_server(create_protocol,
                                              sock=sock,
                                              ssl=config.ssl,
                                              backlog=config.backlog)
            message = "Uvicorn running on socket %s (Press CTRL+C to quit)"
            logger.info(message % str(sock.getsockname()))
            self.servers = [server]

        elif config.uds is not None:
            # Create a socket using UNIX domain socket.
            uds_perms = 0o666
            if os.path.exists(config.uds):
                uds_perms = os.stat(config.uds).st_mode
            server = await loop.create_unix_server(create_protocol,
                                                   path=config.uds,
                                                   ssl=config.ssl,
                                                   backlog=config.backlog)
            os.chmod(config.uds, uds_perms)
            message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)"
            logger.info(message % config.uds)
            self.servers = [server]

        else:
            # Standard case. Create a socket from a host/port pair.
            try:
                server = await loop.create_server(
                    create_protocol,
                    host=config.host,
                    port=config.port,
                    ssl=config.ssl,
                    backlog=config.backlog,
                )
            except OSError as exc:
                logger.error(exc)
                await self.lifespan.shutdown()
                sys.exit(1)
            port = config.port
            if port == 0:
                port = server.sockets[0].getsockname()[1]
            protocol_name = "https" if config.ssl else "http"
            message = "Uvicorn running on %s://%s:%d (Press CTRL+C to quit)"
            color_message = ("Uvicorn running on " +
                             click.style("%s://%s:%d", bold=True) +
                             " (Press CTRL+C to quit)")
            logger.info(
                message,
                protocol_name,
                config.host,
                port,
                extra={"color_message": color_message},
            )
            self.servers = [server]

        self.started = True
Beispiel #59
0
def main():
    if helpers.verbose >= 1:
        helpers.logprefix = ' s: '
    else:
        helpers.logprefix = 'server: '
    debug1('latency control setting = %r\n' % latency_control)
    debug1('send known routes = %r\n' % send_nets)

    if send_nets:
        routes = list(list_routes())
        debug1('available routes:\n')
        for r in routes:
            debug1('  %s/%d\n' % r)
    else:
        routes = list()
        debug1('not sending routes\n')
        
    # synchronization header
    sys.stdout.write('\0\0SSHUTTLE0001')
    sys.stdout.flush()

    handlers = []
    mux = Mux(socket.fromfd(sys.stdin.fileno(),
                            socket.AF_INET, socket.SOCK_STREAM),
              socket.fromfd(sys.stdout.fileno(),
                            socket.AF_INET, socket.SOCK_STREAM))
    handlers.append(mux)
    routepkt = ''
    for r in routes:
        routepkt += '%s,%d\n' % r
    mux.send(0, ssnet.CMD_ROUTES, routepkt)

    hw = Hostwatch()
    hw.leftover = ''
        
    def hostwatch_ready():
        assert(hw.pid)
        content = hw.sock.recv(4096)
        if content:
            lines = (hw.leftover + content).split('\n')
            if lines[-1]:
                # no terminating newline: entry isn't complete yet!
                hw.leftover = lines.pop()
                lines.append('')
            else:
                hw.leftover = ''
            mux.send(0, ssnet.CMD_HOST_LIST, '\n'.join(lines))
        else:
            raise Fatal('hostwatch process died')

    def got_host_req(data):
        if not hw.pid:
            (hw.pid,hw.sock) = start_hostwatch(data.strip().split())
            handlers.append(Handler(socks = [hw.sock],
                                    callback = hostwatch_ready))
    mux.got_host_req = got_host_req

    def new_channel(channel, data):
        (dstip,dstport) = data.split(',', 1)
        dstport = int(dstport)
        outwrap = ssnet.connect_dst(dstip,dstport)
        handlers.append(Proxy(MuxWrapper(mux, channel), outwrap))
    mux.new_channel = new_channel

    dnshandlers = {}
    def dns_req(channel, data):
        debug2('Incoming DNS request.\n')
        h = DnsProxy(mux, channel, data)
        handlers.append(h)
        dnshandlers[channel] = h
    mux.got_dns_req = dns_req

    while mux.ok:
        if hw.pid:
            assert(hw.pid > 0)
            (rpid, rv) = os.waitpid(hw.pid, os.WNOHANG)
            if rpid:
                raise Fatal('hostwatch exited unexpectedly: code 0x%04x\n' % rv)
        
        ssnet.runonce(handlers, mux)
        if latency_control:
            mux.check_fullness()
        mux.callback()

        if dnshandlers:
            now = time.time()
            for channel,h in dnshandlers.items():
                if h.timeout < now or not h.ok:
                    del dnshandlers[channel]
                    h.ok = False
Beispiel #60
0
    def connect(self,
                host,
                port=830,
                timeout=None,
                unknown_host_cb=default_unknown_host_cb,
                username=None,
                password=None,
                key_filename=None,
                allow_agent=True,
                hostkey_verify=True,
                look_for_keys=True,
                ssh_config=None,
                sock_fd=None):
        """Connect via SSH and initialize the NETCONF session. First attempts the publickey authentication method and then password authentication.

        To disable attempting publickey authentication altogether, call with *allow_agent* and *look_for_keys* as `False`.

        *host* is the hostname or IP address to connect to

        *port* is by default 830, but some devices use the default SSH port of 22 so this may need to be specified

        *timeout* is an optional timeout for socket connect

        *unknown_host_cb* is called when the server host key is not recognized. It takes two arguments, the hostname and the fingerprint (see the signature of :func:`default_unknown_host_cb`)

        *username* is the username to use for SSH authentication

        *password* is the password used if using password authentication, or the passphrase to use for unlocking keys that require it

        *key_filename* is a filename where a the private key to be used can be found

        *allow_agent* enables querying SSH agent (if found) for keys

        *hostkey_verify* enables hostkey verification from ~/.ssh/known_hosts

        *look_for_keys* enables looking in the usual locations for ssh keys (e.g. :file:`~/.ssh/id_*`)

        *ssh_config* enables parsing of an OpenSSH configuration file, if set to its path, e.g. :file:`~/.ssh/config` or to True (in this case, use :file:`~/.ssh/config`).

        *sock_fd* is an already open socket which shall be used for this connection. Useful for NETCONF outbound ssh. Use host=None together with a valid sock_fd number
        """
        if not (host or sock_fd):
            raise SSHError("Missing host or socket fd")

        # Optionaly, parse .ssh/config
        config = {}
        if ssh_config is True:
            ssh_config = "~/.ssh/config" if sys.platform != "win32" else "~/ssh/config"
        if ssh_config is not None:
            config = paramiko.SSHConfig()
            config.parse(open(os.path.expanduser(ssh_config)))
            config = config.lookup(host)
            host = config.get("hostname", host)
            if username is None:
                username = config.get("user")
            if key_filename is None:
                key_filename = config.get("identityfile")

        if username is None:
            username = getpass.getuser()

        if sock_fd is None:
            if config.get("proxycommand"):
                sock = paramiko.proxy.ProxyCommand(config.get("proxycommand"))
            else:
                for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
                                              socket.SOCK_STREAM):
                    af, socktype, proto, canonname, sa = res
                    try:
                        sock = socket.socket(af, socktype, proto)
                        sock.settimeout(timeout)
                    except socket.error:
                        continue
                    try:
                        sock.connect(sa)
                    except socket.error:
                        sock.close()
                        continue
                    break
                else:
                    raise SSHError("Could not open socket to %s:%s" %
                                   (host, port))
        else:
            if sys.version_info[0] < 3:
                s = socket.fromfd(int(sock_fd), socket.AF_INET,
                                  socket.SOCK_STREAM)
                sock = socket.socket(socket.AF_INET,
                                     socket.SOCK_STREAM,
                                     _sock=s)
            else:
                sock = socket.fromfd(int(sock_fd), socket.AF_INET,
                                     socket.SOCK_STREAM)
            sock.settimeout(timeout)

        t = self._transport = paramiko.Transport(sock)
        t.set_log_channel(logger.name)
        if config.get("compression") == 'yes':
            t.use_compression()

        try:
            t.start_client()
        except paramiko.SSHException:
            raise SSHError('Negotiation failed')

        # host key verification
        server_key = t.get_remote_server_key()

        fingerprint = _colonify(hexlify(server_key.get_fingerprint()))

        if hostkey_verify:
            known_host = self._host_keys.check(host, server_key)
            if not known_host and not unknown_host_cb(host, fingerprint):
                raise SSHUnknownHostError(host, fingerprint)

        if key_filename is None:
            key_filenames = []
        elif isinstance(key_filename, (str, bytes)):
            key_filenames = [key_filename]
        else:
            key_filenames = key_filename

        self._auth(username, password, key_filenames, allow_agent,
                   look_for_keys)

        self._connected = True  # there was no error authenticating
        # TODO: leopoul: Review, test, and if needed rewrite this part
        subsystem_names = self._device_handler.get_ssh_subsystem_names()
        for subname in subsystem_names:
            c = self._channel = self._transport.open_session()
            self._channel_id = c.get_id()
            channel_name = "%s-subsystem-%s" % (subname, str(self._channel_id))
            c.set_name(channel_name)
            try:
                c.invoke_subsystem(subname)
            except paramiko.SSHException as e:
                logger.info("%s (subsystem request rejected)", e)
                handle_exception = self._device_handler.handle_connection_exceptions(
                    self)
                # Ignore the exception, since we continue to try the different
                # subsystem names until we find one that can connect.
                #have to handle exception for each vendor here
                if not handle_exception:
                    continue
            self._channel_name = c.get_name()
            self._post_connect()
            return
        raise SSHError(
            "Could not open connection, possibly due to unacceptable"
            " SSH subsystem name.")