def test_expectedFDs(self):
        """
        L{_listOpenFDs} lists expected file descriptors.
        """
        openfds = process._listOpenFDs()
        for f in sys.stdin, sys.stdout, sys.stderr:
            self.assertIn(f.fileno(), openfds)

        # See http://twistedmatrix.com/trac/ticket/4522#comment:17
        f = file(os.devnull)
        fd = os.dup(f.fileno())
        try:
            f.close()
            self.assertIn(fd, process._listOpenFDs())
        finally:
            os.close(fd)
        self.assertNotIn(fd, process._listOpenFDs())
Beispiel #2
0
    def test_expectedFDs(self):
        """
        L{_listOpenFDs} lists expected file descriptors.
        """
        # This is a tricky test.  A priori, there is no way to know what file
        # descriptors are open now, so there is no way to know what _listOpenFDs
        # should return.  Work around this by creating some new file descriptors
        # which we can know the state of and then just making assertions about
        # their presence or absence in the result.

        # Expect a file we just opened to be listed.
        f = open(os.devnull)
        openfds = process._listOpenFDs()
        self.assertIn(f.fileno(), openfds)

        # Expect a file we just closed not to be listed - with a caveat.  The
        # implementation may need to open a file to discover the result.  That
        # open file descriptor will be allocated the same number as the one we
        # just closed.  So, instead, create a hole in the file descriptor space
        # to catch that internal descriptor and make the assertion about a
        # different closed file descriptor.

        # This gets allocated a file descriptor larger than f's, since nothing
        # has been closed since we opened f.
        fd = os.dup(f.fileno())

        # But sanity check that; if it fails the test is invalid.
        self.assertTrue(
            fd > f.fileno(),
            "Expected duplicate file descriptor to be greater than original",
        )

        try:
            # Get rid of the original, creating the hole.  The copy should still
            # be open, of course.
            f.close()
            self.assertIn(fd, process._listOpenFDs())
        finally:
            # Get rid of the copy now
            os.close(fd)
        # And it should not appear in the result.
        self.assertNotIn(fd, process._listOpenFDs())
    def test_expectedFDs(self):
        """
        L{_listOpenFDs} lists expected file descriptors.
        """
        # This is a tricky test.  A priori, there is no way to know what file
        # descriptors are open now, so there is no way to know what _listOpenFDs
        # should return.  Work around this by creating some new file descriptors
        # which we can know the state of and then just making assertions about
        # their presence or absence in the result.

        # Expect a file we just opened to be listed.
        f = file(os.devnull)
        openfds = process._listOpenFDs()
        self.assertIn(f.fileno(), openfds)

        # Expect a file we just closed not to be listed - with a caveat.  The
        # implementation may need to open a file to discover the result.  That
        # open file descriptor will be allocated the same number as the one we
        # just closed.  So, instead, create a hole in the file descriptor space
        # to catch that internal descriptor and make the assertion about a
        # different closed file descriptor.

        # This gets allocated a file descriptor larger than f's, since nothing
        # has been closed since we opened f.
        fd = os.dup(f.fileno())

        # But sanity check that; if it fails the test is invalid.
        self.assertTrue(
            fd > f.fileno(),
            "Expected duplicate file descriptor to be greater than original")

        try:
            # Get rid of the original, creating the hole.  The copy should still
            # be open, of course.
            f.close()
            self.assertIn(fd, process._listOpenFDs())
        finally:
            # Get rid of the copy now
            os.close(fd)
        # And it should not appear in the result.
        self.assertNotIn(fd, process._listOpenFDs())
Beispiel #4
0
 def setUp(self):
     self.openSockets = []
     if resource is not None:
         # On some buggy platforms we might leak FDs, and the test will
         # fail creating the initial two sockets we *do* want to
         # succeed. So, we make the soft limit the current number of fds
         # plus two more (for the two sockets we want to succeed). If we've
         # leaked too many fds for that to work, there's nothing we can
         # do.
         from twisted.internet.process import _listOpenFDs
         newLimit = len(_listOpenFDs()) + 2
         self.originalFileLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
         resource.setrlimit(resource.RLIMIT_NOFILE, (newLimit, self.originalFileLimit[1]))
         self.socketLimit = newLimit + 100
 def setUp(self):
     self.openSockets = []
     if resource is not None:
         # On some buggy platforms we might leak FDs, and the test will
         # fail creating the initial two sockets we *do* want to
         # succeed. So, we make the soft limit the current number of fds
         # plus two more (for the two sockets we want to succeed). If we've
         # leaked too many fds for that to work, there's nothing we can
         # do.
         from twisted.internet.process import _listOpenFDs
         newLimit = len(_listOpenFDs()) + 2
         self.originalFileLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
         resource.setrlimit(resource.RLIMIT_NOFILE, (newLimit, self.originalFileLimit[1]))
         self.socketLimit = newLimit + 100
    def test_openFDs(self):
        """
        File descriptors returned by L{_listOpenFDs} are mostly open.

        This test assumes that zero-legth writes fail with EBADF on closed
        file descriptors.
        """
        for fd in process._listOpenFDs():
            try:
                fcntl.fcntl(fd, fcntl.F_GETFL)
            except IOError, err:
                self.assertEqual(
                    errno.EBADF, err.errno,
                    "fcntl(%d, F_GETFL) failed with unexpected errno %d" % (
                        fd, err.errno))
Beispiel #7
0
def fork(uid=None, gid=None):
    settingUID = (uid is not None) or (gid is not None)
    collectorEnabled = gc.isenabled()
    gc.disable()

    if settingUID:
        curegid = os.getegid()
        currgid = os.getgid()
        cureuid = os.geteuid()
        curruid = os.getuid()
        if uid is None:
            uid = cureuid
        if gid is None:
            gid = curegid

        # Prepare to change UID in subprocess.
        os.setuid(0)
        os.setgid(0)

    try:
        pid = os.fork()
    except:
        # Still in the parent process.
        if settingUID:
            os.setregid(currgid, curegid)
            os.setreuid(curruid, cureuid)
        if collectorEnabled:
            gc.enable()
        raise
    else:
        if pid == 0:  # pid is 0 in the child process
            # do not put *ANY* code outside the try block. The
            # child process must either exec or _exit. If it gets
            # outside this block (due to an exception that is not
            # handled here, but which might be handled higher up),
            # there will be two copies of the parent running in
            # parallel, doing all kinds of damage.

            # After each change to this code, review it to make sure there
            # are no exit paths.
            try:
                # Stop debugging. If I am, I don't care anymore.
                sys.settrace(None)

                for fd in _listOpenFDs():
                    if fd > 2:
                        try:
                            os.dup(fd)
                        except:
                            pass
            except:
                try:
                    stderr = os.fdopen(2, 'w')
                    traceback.print_exc(file=stderr)
                    stderr.flush()
                    for fd in range(3):
                        os.close(fd)
                except:
                    pass  # make *sure* the child terminates
            else:
                raise ProcessForked()

            # Did you read the comment about not adding code here?
            os._exit(1)

    # we are now in parent process
    if settingUID:
        os.setregid(currgid, curegid)
        os.setreuid(curruid, cureuid)

    if collectorEnabled:
        gc.enable()

    return pid