Exemplo n.º 1
0
def _machine_id():
    """
    for informational purposes, try to get a machine unique id thing
    """
    if platform.isLinux():
        try:
            # why this? see: http://0pointer.de/blog/projects/ids.html
            with open('/var/lib/dbus/machine-id', 'r') as f:
                return f.read().strip()
        except:
            # Non-dbus using Linux, get a hostname
            return socket.gethostname()

    elif platform.isMacOSX():
        # Get the serial number of the platform
        import plistlib
        plist_data = subprocess.check_output(
            ["ioreg", "-rd1", "-c", "IOPlatformExpertDevice", "-a"])

        if six.PY2:
            # Only API on 2.7
            return plistlib.readPlistFromString(
                plist_data)["IOPlatformSerialNumber"]
        else:
            # New, non-deprecated 3.4+ API
            return plistlib.loads(plist_data)[0]["IOPlatformSerialNumber"]

    else:
        # Something else, just get a hostname
        return socket.gethostname()
Exemplo n.º 2
0
def _getInstallFunction(platform):
    """
    Return a function to install the reactor most suited for the given platform.

    @param platform: The platform for which to select a reactor.
    @type platform: L{twisted.python.runtime.Platform}

    @return: A zero-argument callable which will install the selected
        reactor.
    """
    # Linux: epoll(7) is the default, since it scales well.
    #
    # macOS: poll(2) is not exposed by Python because it doesn't support all
    # file descriptors (in particular, lack of PTY support is a problem) --
    # see <http://bugs.python.org/issue5154>. kqueue has the same restrictions
    # as poll(2) as far PTY support goes.
    #
    # Windows: IOCP should eventually be default, but still has some serious
    # bugs, e.g. <http://twistedmatrix.com/trac/ticket/4667>.
    #
    # We therefore choose epoll(7) on Linux, poll(2) on other non-macOS POSIX
    # platforms, and select(2) everywhere else.
    try:
        if platform.isLinux():
            try:
                from twisted.internet.epollreactor import install
            except ImportError:
                from twisted.internet.pollreactor import install
        elif platform.getType() == 'posix' and not platform.isMacOSX():
            from twisted.internet.pollreactor import install
        else:
            from twisted.internet.selectreactor import install
    except ImportError:
        from twisted.internet.selectreactor import install
    return install
Exemplo n.º 3
0
def _getInstallFunction(platform):
    """
    Return a function to install the reactor most suited for the given platform.

    @param platform: The platform for which to select a reactor.
    @type platform: L{twisted.python.runtime.Platform}

    @return: A zero-argument callable which will install the selected
        reactor.
    """
    # Linux: Once <http://twistedmatrix.com/trac/ticket/4429> is fixed
    # epoll should be the default.
    #
    # OS X: poll(2) is not exposed by Python because it doesn't
    # support all file descriptors (in particular, lack of PTY support
    # is a problem) -- see <http://bugs.python.org/issue5154>. kqueue
    # reactor is being rewritten (see
    # <http://twistedmatrix.com/trac/ticket/1918>), and also has same
    # restriction as poll(2) as far PTY support goes.
    #
    # Windows: IOCP should eventually be default, but still has a few
    # remaining bugs,
    # e.g. <http://twistedmatrix.com/trac/ticket/4667>.
    #
    # We therefore choose poll(2) on non-OS X POSIX platforms, and
    # select(2) everywhere else.
    if platform.getType() == 'posix' and not platform.isMacOSX():
        from twisted.internet.pollreactor import install
    else:
        from twisted.internet.selectreactor import install
    return install
Exemplo n.º 4
0
Arquivo: default.py Projeto: 0004c/VTK
def _getInstallFunction(platform):
    """
    Return a function to install the reactor most suited for the given platform.

    @param platform: The platform for which to select a reactor.
    @type platform: L{twisted.python.runtime.Platform}

    @return: A zero-argument callable which will install the selected
        reactor.
    """
    # Linux: epoll(7) is the default, since it scales well.
    #
    # OS X: poll(2) is not exposed by Python because it doesn't support all
    # file descriptors (in particular, lack of PTY support is a problem) --
    # see <http://bugs.python.org/issue5154>. kqueue has the same restrictions
    # as poll(2) as far PTY support goes.
    #
    # Windows: IOCP should eventually be default, but still has some serious
    # bugs, e.g. <http://twistedmatrix.com/trac/ticket/4667>.
    #
    # We therefore choose epoll(7) on Linux, poll(2) on other non-OS X POSIX
    # platforms, and select(2) everywhere else.
    try:
        if platform.isLinux():
            try:
                from twisted.internet.epollreactor import install
            except ImportError:
                from twisted.internet.pollreactor import install
        elif platform.getType() == 'posix' and not platform.isMacOSX():
            from twisted.internet.pollreactor import install
        else:
            from twisted.internet.selectreactor import install
    except ImportError:
        from twisted.internet.selectreactor import install
    return install
Exemplo n.º 5
0
def _machine_id():
    """
    for informational purposes, try to get a machine unique id thing
    """
    if platform.isLinux():
        try:
            # why this? see: http://0pointer.de/blog/projects/ids.html
            with open('/var/lib/dbus/machine-id', 'r') as f:
                return f.read().strip()
        except:
            # Non-dbus using Linux, get a hostname
            return socket.gethostname()

    elif platform.isMacOSX():
        # Get the serial number of the platform
        import plistlib
        plist_data = subprocess.check_output(["ioreg", "-rd1", "-c", "IOPlatformExpertDevice", "-a"])

        if six.PY2:
            # Only API on 2.7
            return plistlib.readPlistFromString(plist_data)[0]["IOPlatformSerialNumber"]
        else:
            # New, non-deprecated 3.4+ API
            return plistlib.loads(plist_data)[0]["IOPlatformSerialNumber"]

    else:
        # Something else, just get a hostname
        return socket.gethostname()
Exemplo n.º 6
0
def getreciever(addr):
    if platform.isLinux():
        try:
            return epolludprecieve(addr)
        except:
            return polludprecieve(addr)

    elif platform.getType() == 'posix' and not platform.isMacOSX():
        return polludprecieve(addr)

    elif platform.isMacOSX():
        try:
            return kqueueudprecieve(addr)
        except:
            return polludprecieve(addr)
    else:
        return polludprecieve(addr)
Exemplo n.º 7
0
def _get_reactor(platform):
    try:
        if platform.isLinux():
            try:
                from twisted.internet import epollreactor
                cls = epollreactor.EPollReactor
            except ImportError:
                from twisted.internet import pollreactor
                cls = pollreactor.PollReactor
        elif platform.isMacOSX():
            from twisted.internet import kqreactor
            cls = kqreactor.KQueueReactor
        elif platform.getType() == 'posix' and not platform.isMacOSX():
            from twisted.internet import pollreactor
            cls = pollreactor.PollReactor
        else:
            from twisted.internet import selectreactor
            cls = selectreactor.SelectReactor
    except ImportError:
        from twisted.internet import selectreactor
        cls = selectreactor.SelectReactor
    return cls()
Exemplo n.º 8
0
class PTYProcessTestsBuilder(ProcessTestsBuilderBase):
    """
    Builder defining tests relating to L{IReactorProcess} for child processes
    which have a PTY.
    """
    usePTY = True

    if platform.isWindows():
        skip = "PTYs are not supported on Windows."
    elif platform.isMacOSX():
        skippedReactors = {
            "twisted.internet.pollreactor.PollReactor":
                "OS X's poll() does not support PTYs"}
Exemplo n.º 9
0
def _get_reactor(platform):
    try:
        if platform.isLinux():
            try:
                from twisted.internet import epollreactor
                cls = epollreactor.EPollReactor
            except ImportError:
                from twisted.internet import pollreactor
                cls = pollreactor.PollReactor
        elif platform.isMacOSX():
            from twisted.internet import kqreactor
            cls = kqreactor.KQueueReactor
        elif platform.getType() == 'posix' and not platform.isMacOSX():
            from twisted.internet import pollreactor
            cls = pollreactor.PollReactor
        else:
            from twisted.internet import selectreactor
            cls = selectreactor.SelectReactor
    except ImportError:
        from twisted.internet import selectreactor
        cls = selectreactor.SelectReactor
    return cls()
Exemplo n.º 10
0
    def fileStoreFromPath(cls, path):
        """
        @param path: a path pointing at the document root, where the file-based
            data-store is located.
        @type path: L{CachingFilePath}
        """

        # Legacy: old file store only ever used these two top-level paths
        for homeType in ("calendars", "addressbooks"):
            if path.child(homeType).exists():
                if platform.isMacOSX():
                    appropriateStoreClass = XattrPropertyStore
                else:
                    attrs = xattr.xattr(path.path)
                    try:
                        attrs.get('user.should-not-be-set')
                    except IOError, ioe:
                        if ioe.errno == errno.ENODATA:
                            # xattrs are supported and enabled on the filesystem
                            # where the calendar data lives.  this takes some
                            # doing (you have to edit fstab), so this means
                            # we're trying to migrate some 2.x data from a
                            # previous linux installation.
                            appropriateStoreClass = XattrPropertyStore
                        elif ioe.errno == errno.EOPNOTSUPP:
                            # The operation wasn't supported.  This is what will
                            # usually happen on a naively configured filesystem,
                            # so this means we're most likely trying to migrate
                            # some data from an untarred archive created on an
                            # OS X installation using xattrs.
                            appropriateStoreClass = AppleDoubleStore
                        else:
                            # No need to check for ENOENT and the like; we just
                            # checked above to make sure the parent exists.
                            # Other errors are not anticipated here, so fail
                            # fast.
                            raise

                    appropriateStoreClass = AppleDoubleStore

                from txdav.common.datastore.file import CommonDataStore as FileStore
                return FileStore(path,
                                 None,
                                 None,
                                 True,
                                 True,
                                 propertyStoreClass=appropriateStoreClass)
Exemplo n.º 11
0
    def fileStoreFromPath(cls, path):
        """
        @param path: a path pointing at the document root, where the file-based
            data-store is located.
        @type path: L{CachingFilePath}
        """

        # TODO: TOPPATHS should be computed based on enabled flags in 'store',
        # not hard coded.
        for homeType in TOPPATHS:
            if path.child(homeType).exists():
                if platform.isMacOSX():
                    appropriateStoreClass = XattrPropertyStore
                else:
                    attrs = xattr.xattr(path.path)
                    try:
                        attrs.get('user.should-not-be-set')
                    except IOError, ioe:
                        if ioe.errno == errno.ENODATA:
                            # xattrs are supported and enabled on the filesystem
                            # where the calendar data lives.  this takes some
                            # doing (you have to edit fstab), so this means
                            # we're trying to migrate some 2.x data from a
                            # previous linux installation.
                            appropriateStoreClass = XattrPropertyStore
                        elif ioe.errno == errno.EOPNOTSUPP:
                            # The operation wasn't supported.  This is what will
                            # usually happen on a naively configured filesystem,
                            # so this means we're most likely trying to migrate
                            # some data from an untarred archive created on an
                            # OS X installation using xattrs.
                            appropriateStoreClass = AppleDoubleStore
                        else:
                            # No need to check for ENOENT and the like; we just
                            # checked above to make sure the parent exists.
                            # Other errors are not anticipated here, so fail
                            # fast.
                            raise

                    appropriateStoreClass = AppleDoubleStore

                return FileStore(
                    path, None, None, True, True,
                    propertyStoreClass=appropriateStoreClass)
Exemplo n.º 12
0
class Gtk3ReactorTests(TestCase):
    """
    Tests for L{gtk3reactor}.
    """

    def test_requiresDISPLAY(self):
        """
        On X11, L{gtk3reactor} is unimportable if the C{DISPLAY} environment
        variable is not set.
        """
        display = os.environ.get("DISPLAY", None)
        if display is not None:
            self.addCleanup(os.environ.__setitem__, "DISPLAY", display)
            del os.environ["DISPLAY"]
        with SetAsideModule("twisted.internet.gtk3reactor"):
            exc = self.assertRaises(ImportError,
                                    __import__, "twisted.internet.gtk3reactor")
            self.assertEqual(
                exc.args[0],
                "Gtk3 requires X11, and no DISPLAY environment variable is set")

    if platform.getType() != "posix" or platform.isMacOSX():
        test_requiresDISPLAY.skip = "This test is only relevant when using X11"
Exemplo n.º 13
0
def machine_id() -> str:
    """
    For informational purposes, get a unique ID or serial for this machine (device).

    :returns: Unique machine (device) ID (serial), e.g. ``81655b901e334fc1ad59cbf2719806b7``.
    """
    from twisted.python.runtime import platform

    if platform.isLinux():
        try:
            # why this? see: http://0pointer.de/blog/projects/ids.html
            with open('/var/lib/dbus/machine-id', 'r') as f:
                return f.read().strip()
        except:
            # Non-dbus using Linux, get a hostname
            return socket.gethostname()
    elif platform.isMacOSX():
        import plistlib
        plist_data = subprocess.check_output(
            ["ioreg", "-rd1", "-c", "IOPlatformExpertDevice", "-a"])
        return plistlib.loads(plist_data)[0]["IOPlatformSerialNumber"]
    else:
        return socket.gethostname()
Exemplo n.º 14
0
def getDefaultConfigObj():
    """
    Return a configobj instance with default values
    """
    from logging.handlers import DEFAULT_TCP_LOGGING_PORT
    from twisted.python.runtime import platform
    if platform.isMacOSX():
        logdir = os.path.join(os.environ["HOME"], "Library", "Logs", "Angel")
    else:
        logdir = os.path.join(os.environ["HOME"], ".angel-app", "log")

    #some defaults have to be computed first:
    defaults = {
        "angelhome":
        os.path.join(os.environ["HOME"], ".angel-app"),
        "repository":
        os.path.join(os.environ["HOME"], ".angel-app", "repository"),
        "keyring":
        os.path.join(os.environ["HOME"], ".angel-app", "keyring"),
        "angelshellinit":
        os.path.join(os.environ["HOME"], ".angel-app", "angelshellinit.py"),
        "logdir":
        logdir,
        "loglistenport":
        str(DEFAULT_TCP_LOGGING_PORT),
        "logformat":
        '%(asctime)s %(levelname)-6s %(name)-20s - %(message)s',
        "consolelogformat":
        '%(levelname)-6s %(name)-20s - %(message)s',
    }

    # create a string for the default config:
    defaultconfig_txt = """
    [common]
    # the root of all persistent state
    angelhome = "%(angelhome)s"
    # where we keep the actual file system data
    repository =  "%(repository)s"
    # For fancy setups, where the repository is not located on the same partition
    # than angelhome, you need to setup a separate tmp path for atomic file renames
    # repository-tmp =  /foo/bar/
    # where we keep key files
    keyring = "%(keyring)s"
    # logs go here
    logdir = "%(logdir)s"
    # remember at most this many clones of a resource
    maxclones = 5
    # one of DEBUG, INFO, WARN, ERROR, CRITICAL
    loglevel = INFO
    loglistenport = %(loglistenport)s
    logformat = '%(logformat)s'
    consolelogformat = '%(consolelogformat)s'
    # enable growling and the likes:
    desktopnotification = True
    # maximum download speed in kiB, 0 disables limit
    maxdownloadspeed_kib = 0
    # wether to use forking to optimize network connectivity:
    workerforking = True

    [presenter]
    # presenter provides priviledged DAV support on localhost (=> Finder) 
    enable = True
    listenPort = 6222
    listenInterface = localhost
    
    [provider]
    # provider provides read-only DAV access to remote clients
    enable = True
    listenPort = 6221
    # may use IPv6 for improved connectivity (teredo)
    useIPv6 = False
    
    [maintainer]
    # steps through resources, syncs with remote clones
    enable = True
    # sleep this long between resource inspection
    initialsleep = 1 # it's nice to be fast on the first traversal
    # don't sleep longer than this (in seconds) between resource inspections
    maxsleeptime = 3600
    # try to make a complete traversal take about this long 
    # (use a long time for low resource usage, a short one for tight synchronization)
    treetraversaltime = 86400 # we want a tree traversal to take about one day after the initial sync
    # the default name of this node with which it will advertise itself
    # to remote nodes. if you have a valid host name (DNS entry), use it here:
    nodename = unknown.invalid
    
    [gui]
    # start master process on gui startup
    autostartp2p = True
    # if it exists, this (python) script will be executed by the angelshell when you load it
    angelshellinit = '%(angelshellinit)s'

    [mounttab]
    """ % (defaults)

    cfg = ConfigObj(defaultconfig_txt.splitlines(),
                    configspec=_configspec_lines())
    cfg.interpolation = False
    assert isValidConfig(cfg) == True
    return cfg
Exemplo n.º 15
0
def isMacOSX():
    return platform.isMacOSX()
Exemplo n.º 16
0
class UNIXTestsBuilder(UNIXFamilyMixin, ReactorBuilder, ConnectionTestsMixin):
    """
    Builder defining tests relating to L{IReactorUNIX}.
    """
    requiredInterfaces = (IReactorUNIX, )

    endpoints = UNIXCreator()

    def test_mode(self):
        """
        The UNIX socket created by L{IReactorUNIX.listenUNIX} is created with
        the mode specified.
        """
        self._modeTest('listenUNIX', self.mktemp(), ServerFactory())

    def test_listenOnLinuxAbstractNamespace(self):
        """
        On Linux, a UNIX socket path may begin with C{'\0'} to indicate a socket
        in the abstract namespace.  L{IReactorUNIX.listenUNIX} accepts such a
        path.
        """
        # Don't listen on a path longer than the maximum allowed.
        path = _abstractPath(self)
        reactor = self.buildReactor()
        port = reactor.listenUNIX('\0' + path, ServerFactory())
        self.assertEqual(port.getHost(), UNIXAddress('\0' + path))

    if not platform.isLinux():
        test_listenOnLinuxAbstractNamespace.skip = (
            'Abstract namespace UNIX sockets only supported on Linux.')

    def test_listenFailure(self):
        """
        L{IReactorUNIX.listenUNIX} raises L{CannotListenError} if the
        underlying port's createInternetSocket raises a socket error.
        """
        def raiseSocketError(self):
            raise error('FakeBasePort forced socket.error')

        self.patch(base.BasePort, "createInternetSocket", raiseSocketError)
        reactor = self.buildReactor()
        with self.assertRaises(CannotListenError):
            reactor.listenUNIX('not-used', ServerFactory())

    def test_connectToLinuxAbstractNamespace(self):
        """
        L{IReactorUNIX.connectUNIX} also accepts a Linux abstract namespace
        path.
        """
        path = _abstractPath(self)
        reactor = self.buildReactor()
        connector = reactor.connectUNIX('\0' + path, ClientFactory())
        self.assertEqual(connector.getDestination(), UNIXAddress('\0' + path))

    if not platform.isLinux():
        test_connectToLinuxAbstractNamespace.skip = (
            'Abstract namespace UNIX sockets only supported on Linux.')

    def test_addresses(self):
        """
        A client's transport's C{getHost} and C{getPeer} return L{UNIXAddress}
        instances which have the filesystem path of the host and peer ends of
        the connection.
        """
        class SaveAddress(ConnectableProtocol):
            def makeConnection(self, transport):
                self.addresses = dict(host=transport.getHost(),
                                      peer=transport.getPeer())
                transport.loseConnection()

        server = SaveAddress()
        client = SaveAddress()

        runProtocolsWithReactor(self, server, client, self.endpoints)

        self.assertEqual(server.addresses['host'], client.addresses['peer'])
        self.assertEqual(server.addresses['peer'], client.addresses['host'])

    def test_sendFileDescriptor(self):
        """
        L{IUNIXTransport.sendFileDescriptor} accepts an integer file descriptor
        and sends a copy of it to the process reading from the connection.
        """
        from socket import fromfd

        s = socket()
        s.bind(('', 0))
        server = SendFileDescriptor(s.fileno(), b"junk")

        client = ReceiveFileDescriptor()
        d = client.waitForDescriptor()

        def checkDescriptor(descriptor):
            received = fromfd(descriptor, AF_INET, SOCK_STREAM)
            # Thanks for the free dup, fromfd()
            close(descriptor)

            # If the sockets have the same local address, they're probably the
            # same.
            self.assertEqual(s.getsockname(), received.getsockname())

            # But it would be cheating for them to be identified by the same
            # file descriptor.  The point was to get a copy, as we might get if
            # there were two processes involved here.
            self.assertNotEqual(s.fileno(), received.fileno())

        d.addCallback(checkDescriptor)
        d.addErrback(err, "Sending file descriptor encountered a problem")
        d.addBoth(lambda ignored: server.transport.loseConnection())

        runProtocolsWithReactor(self, server, client, self.endpoints)

    if sendmsgSkip is not None:
        test_sendFileDescriptor.skip = sendmsgSkip

    def test_sendFileDescriptorTriggersPauseProducing(self):
        """
        If a L{IUNIXTransport.sendFileDescriptor} call fills up the send buffer,
        any registered producer is paused.
        """
        class DoesNotRead(ConnectableProtocol):
            def connectionMade(self):
                self.transport.pauseProducing()

        class SendsManyFileDescriptors(ConnectableProtocol):
            paused = False

            def connectionMade(self):
                self.socket = socket()
                self.transport.registerProducer(self, True)

                def sender():
                    self.transport.sendFileDescriptor(self.socket.fileno())
                    self.transport.write(b"x")

                self.task = LoopingCall(sender)
                self.task.clock = self.transport.reactor
                self.task.start(0).addErrback(err, "Send loop failure")

            def stopProducing(self):
                self._disconnect()

            def resumeProducing(self):
                self._disconnect()

            def pauseProducing(self):
                self.paused = True
                self.transport.unregisterProducer()
                self._disconnect()

            def _disconnect(self):
                self.task.stop()
                self.transport.abortConnection()
                self.other.transport.abortConnection()

        server = SendsManyFileDescriptors()
        client = DoesNotRead()
        server.other = client
        runProtocolsWithReactor(self, server, client, self.endpoints)

        self.assertTrue(server.paused,
                        "sendFileDescriptor producer was not paused")

    if sendmsgSkip is not None:
        test_sendFileDescriptorTriggersPauseProducing.skip = sendmsgSkip

    def test_fileDescriptorOverrun(self):
        """
        If L{IUNIXTransport.sendFileDescriptor} is used to queue a greater
        number of file descriptors than the number of bytes sent using
        L{ITransport.write}, the connection is closed and the protocol connected
        to the transport has its C{connectionLost} method called with a failure
        wrapping L{FileDescriptorOverrun}.
        """
        cargo = socket()
        server = SendFileDescriptor(cargo.fileno(), None)

        client = ReceiveFileDescriptor()
        result = []
        d = client.waitForDescriptor()
        d.addBoth(result.append)
        d.addBoth(lambda ignored: server.transport.loseConnection())

        runProtocolsWithReactor(self, server, client, self.endpoints)

        self.assertIsInstance(result[0], Failure)
        result[0].trap(ConnectionClosed)
        self.assertIsInstance(server.reason.value, FileDescriptorOverrun)

    if sendmsgSkip is not None:
        test_fileDescriptorOverrun.skip = sendmsgSkip

    def _sendmsgMixinFileDescriptorReceivedDriver(self, ancillaryPacker):
        """
        Drive _SendmsgMixin via sendmsg socket calls to check that
        L{IFileDescriptorReceiver.fileDescriptorReceived} is called once
        for each file descriptor received in the ancillary messages.

        @param ancillaryPacker: A callable that will be given a list of
            two file descriptors and should return a two-tuple where:
            The first item is an iterable of zero or more (cmsg_level,
            cmsg_type, cmsg_data) tuples in the same order as the given
            list for actual sending via sendmsg; the second item is an
            integer indicating the expected number of FDs to be received.
        """
        # Strategy:
        # - Create a UNIX socketpair.
        # - Associate one end to a FakeReceiver and FakeProtocol.
        # - Call sendmsg on the other end to send FDs as ancillary data.
        #   Ancillary data is obtained calling ancillaryPacker with
        #   the two FDs associated to two temp files (using the socket
        #   FDs for this fails the device/inode verification tests on
        #   macOS 10.10, so temp files are used instead).
        # - Call doRead in the FakeReceiver.
        # - Verify results on FakeProtocol.
        #   Using known device/inodes to verify correct order.

        # TODO: replace FakeReceiver test approach with one based in
        # IReactorSocket.adoptStreamConnection once AF_UNIX support is
        # implemented; see https://twistedmatrix.com/trac/ticket/5573.

        from socket import socketpair
        from twisted.internet.unix import _SendmsgMixin
        from twisted.python.sendmsg import sendmsg

        def deviceInodeTuple(fd):
            fs = fstat(fd)
            return (fs.st_dev, fs.st_ino)

        @implementer(IFileDescriptorReceiver)
        class FakeProtocol(ConnectableProtocol):
            def __init__(self):
                self.fds = []
                self.deviceInodesReceived = []

            def fileDescriptorReceived(self, fd):
                self.fds.append(fd)
                self.deviceInodesReceived.append(deviceInodeTuple(fd))
                close(fd)

        class FakeReceiver(_SendmsgMixin):
            bufferSize = 1024

            def __init__(self, skt, proto):
                self.socket = skt
                self.protocol = proto

            def _dataReceived(self, data):
                pass

            def getHost(self):
                pass

            def getPeer(self):
                pass

            def _getLogPrefix(self, o):
                pass

        sendSocket, recvSocket = socketpair(AF_UNIX, SOCK_STREAM)
        self.addCleanup(sendSocket.close)
        self.addCleanup(recvSocket.close)

        proto = FakeProtocol()
        receiver = FakeReceiver(recvSocket, proto)

        # Temp files give us two FDs to send/receive/verify.
        fileOneFD, fileOneName = mkstemp()
        fileTwoFD, fileTwoName = mkstemp()
        self.addCleanup(unlink, fileOneName)
        self.addCleanup(unlink, fileTwoName)

        dataToSend = b'some data needs to be sent'
        fdsToSend = [fileOneFD, fileTwoFD]
        ancillary, expectedCount = ancillaryPacker(fdsToSend)
        sendmsg(sendSocket, dataToSend, ancillary)

        receiver.doRead()

        # Verify that fileDescriptorReceived was called twice.
        self.assertEqual(len(proto.fds), expectedCount)

        # Verify that received FDs are different from the sent ones.
        self.assertFalse(set(fdsToSend).intersection(set(proto.fds)))

        # Verify that FDs were received in the same order, if any.
        if proto.fds:
            deviceInodesSent = [deviceInodeTuple(fd) for fd in fdsToSend]
            self.assertEqual(deviceInodesSent, proto.deviceInodesReceived)

    def test_multiFileDescriptorReceivedPerRecvmsgOneCMSG(self):
        """
        _SendmsgMixin handles multiple file descriptors per recvmsg, calling
        L{IFileDescriptorReceiver.fileDescriptorReceived} once per received
        file descriptor. Scenario: single CMSG with two FDs.
        """
        from twisted.python.sendmsg import SCM_RIGHTS

        def ancillaryPacker(fdsToSend):
            ancillary = [(SOL_SOCKET, SCM_RIGHTS, pack('ii', *fdsToSend))]
            expectedCount = 2
            return ancillary, expectedCount

        self._sendmsgMixinFileDescriptorReceivedDriver(ancillaryPacker)

    if sendmsgSkip is not None:
        test_multiFileDescriptorReceivedPerRecvmsgOneCMSG.skip = sendmsgSkip

    def test_multiFileDescriptorReceivedPerRecvmsgTwoCMSGs(self):
        """
        _SendmsgMixin handles multiple file descriptors per recvmsg, calling
        L{IFileDescriptorReceiver.fileDescriptorReceived} once per received
        file descriptor. Scenario: two CMSGs with one FD each.
        """
        from twisted.python.sendmsg import SCM_RIGHTS

        def ancillaryPacker(fdsToSend):
            ancillary = [(SOL_SOCKET, SCM_RIGHTS, pack('i', fd))
                         for fd in fdsToSend]
            expectedCount = 2
            return ancillary, expectedCount

        self._sendmsgMixinFileDescriptorReceivedDriver(ancillaryPacker)

    if platform.isMacOSX():
        test_multiFileDescriptorReceivedPerRecvmsgTwoCMSGs.skip = (
            "Multi control message ancillary sendmsg not supported on Mac.")
    elif sendmsgSkip is not None:
        test_multiFileDescriptorReceivedPerRecvmsgTwoCMSGs.skip = sendmsgSkip

    def test_multiFileDescriptorReceivedPerRecvmsgBadCMSG(self):
        """
        _SendmsgMixin handles multiple file descriptors per recvmsg, calling
        L{IFileDescriptorReceiver.fileDescriptorReceived} once per received
        file descriptor. Scenario: unsupported CMSGs.
        """
        # Given that we can't just send random/invalid ancillary data via the
        # packer for it to be sent via sendmsg -- the kernel would not accept
        # it -- we'll temporarily replace recvmsg with a fake one that produces
        # a non-supported ancillary message level/type. This being said, from
        # the perspective of the ancillaryPacker, all that is required is to
        # let the test driver know that 0 file descriptors are expected.
        from twisted.python import sendmsg

        def ancillaryPacker(fdsToSend):
            ancillary = []
            expectedCount = 0
            return ancillary, expectedCount

        def fakeRecvmsgUnsupportedAncillary(skt, *args, **kwargs):
            data = b'some data'
            ancillary = [(None, None, b'')]
            flags = 0
            return sendmsg.RecievedMessage(data, ancillary, flags)

        events = []
        addObserver(events.append)
        self.addCleanup(removeObserver, events.append)

        self.patch(sendmsg, "recvmsg", fakeRecvmsgUnsupportedAncillary)
        self._sendmsgMixinFileDescriptorReceivedDriver(ancillaryPacker)

        # Verify the expected message was logged.
        expectedMessage = 'received unsupported ancillary data'
        found = any(expectedMessage in e['format'] for e in events)
        self.assertTrue(found, 'Expected message not found in logged events')

    if sendmsgSkip is not None:
        test_multiFileDescriptorReceivedPerRecvmsgBadCMSG.skip = sendmsgSkip

    def test_avoidLeakingFileDescriptors(self):
        """
        If associated with a protocol which does not provide
        L{IFileDescriptorReceiver}, file descriptors received by the
        L{IUNIXTransport} implementation are closed and a warning is emitted.
        """
        # To verify this, establish a connection.  Send one end of the
        # connection over the IUNIXTransport implementation.  After the copy
        # should no longer exist, close the original.  If the opposite end of
        # the connection decides the connection is closed, the copy does not
        # exist.
        from socket import socketpair
        probeClient, probeServer = socketpair()

        events = []
        addObserver(events.append)
        self.addCleanup(removeObserver, events.append)

        class RecordEndpointAddresses(SendFileDescriptor):
            def connectionMade(self):
                self.hostAddress = self.transport.getHost()
                self.peerAddress = self.transport.getPeer()
                SendFileDescriptor.connectionMade(self)

        server = RecordEndpointAddresses(probeClient.fileno(), b"junk")
        client = ConnectableProtocol()

        runProtocolsWithReactor(self, server, client, self.endpoints)

        # Get rid of the original reference to the socket.
        probeClient.close()

        # A non-blocking recv will return "" if the connection is closed, as
        # desired.  If the connection has not been closed, because the
        # duplicate file descriptor is still open, it will fail with EAGAIN
        # instead.
        probeServer.setblocking(False)
        self.assertEqual(b"", probeServer.recv(1024))

        # This is a surprising circumstance, so it should be logged.
        format = ("%(protocolName)s (on %(hostAddress)r) does not "
                  "provide IFileDescriptorReceiver; closing file "
                  "descriptor received (from %(peerAddress)r).")
        clsName = "ConnectableProtocol"

        # Reverse host and peer, since the log event is from the client
        # perspective.
        expectedEvent = dict(hostAddress=server.peerAddress,
                             peerAddress=server.hostAddress,
                             protocolName=clsName,
                             format=format)

        for logEvent in events:
            for k, v in iteritems(expectedEvent):
                if v != logEvent.get(k):
                    break
            else:
                # No mismatches were found, stop looking at events
                break
        else:
            # No fully matching events were found, fail the test.
            self.fail("Expected event (%s) not found in logged events (%s)" %
                      (expectedEvent, pformat(events, )))

    if sendmsgSkip is not None:
        test_avoidLeakingFileDescriptors.skip = sendmsgSkip

    def test_descriptorDeliveredBeforeBytes(self):
        """
        L{IUNIXTransport.sendFileDescriptor} sends file descriptors before
        L{ITransport.write} sends normal bytes.
        """
        @implementer(IFileDescriptorReceiver)
        class RecordEvents(ConnectableProtocol):
            def connectionMade(self):
                ConnectableProtocol.connectionMade(self)
                self.events = []

            def fileDescriptorReceived(innerSelf, descriptor):
                self.addCleanup(close, descriptor)
                innerSelf.events.append(type(descriptor))

            def dataReceived(self, data):
                self.events.extend(data)

        cargo = socket()
        server = SendFileDescriptor(cargo.fileno(), b"junk")
        client = RecordEvents()

        runProtocolsWithReactor(self, server, client, self.endpoints)

        self.assertEqual(int, client.events[0])
        if _PY3:
            self.assertEqual(b"junk", bytes(client.events[1:]))
        else:
            self.assertEqual(b"junk", b"".join(client.events[1:]))

    if sendmsgSkip is not None:
        test_descriptorDeliveredBeforeBytes.skip = sendmsgSkip
Exemplo n.º 17
0
def getDefaultConfigObj():
    """
    Return a configobj instance with default values
    """
    from logging.handlers import DEFAULT_TCP_LOGGING_PORT
    from twisted.python.runtime import platform
    if platform.isMacOSX():
        logdir = os.path.join(os.environ["HOME"], "Library", "Logs", "Angel")
    else:
        logdir = os.path.join(os.environ["HOME"], ".angel-app", "log")
        
    #some defaults have to be computed first:
    defaults = {   
            "angelhome" : os.path.join(os.environ["HOME"], ".angel-app"),
            "repository" : os.path.join(os.environ["HOME"], ".angel-app", "repository"),
            "keyring" : os.path.join(os.environ["HOME"], ".angel-app", "keyring"),
            "angelshellinit" : os.path.join(os.environ["HOME"], ".angel-app", "angelshellinit.py"),
            "logdir" : logdir,
            "loglistenport" : str(DEFAULT_TCP_LOGGING_PORT),
            "logformat" : '%(asctime)s %(levelname)-6s %(name)-20s - %(message)s',
            "consolelogformat" : '%(levelname)-6s %(name)-20s - %(message)s',
                }
    
    # create a string for the default config:
    defaultconfig_txt = """
    [common]
    # the root of all persistent state
    angelhome = "%(angelhome)s"
    # where we keep the actual file system data
    repository =  "%(repository)s"
    # For fancy setups, where the repository is not located on the same partition
    # than angelhome, you need to setup a separate tmp path for atomic file renames
    # repository-tmp =  /foo/bar/
    # where we keep key files
    keyring = "%(keyring)s"
    # logs go here
    logdir = "%(logdir)s"
    # remember at most this many clones of a resource
    maxclones = 5
    # one of DEBUG, INFO, WARN, ERROR, CRITICAL
    loglevel = INFO
    loglistenport = %(loglistenport)s
    logformat = '%(logformat)s'
    consolelogformat = '%(consolelogformat)s'
    # enable growling and the likes:
    desktopnotification = True
    # maximum download speed in kiB, 0 disables limit
    maxdownloadspeed_kib = 0
    # wether to use forking to optimize network connectivity:
    workerforking = True

    [presenter]
    # presenter provides priviledged DAV support on localhost (=> Finder) 
    enable = True
    listenPort = 6222
    listenInterface = localhost
    
    [provider]
    # provider provides read-only DAV access to remote clients
    enable = True
    listenPort = 6221
    # may use IPv6 for improved connectivity (teredo)
    useIPv6 = False
    
    [maintainer]
    # steps through resources, syncs with remote clones
    enable = True
    # sleep this long between resource inspection
    initialsleep = 1 # it's nice to be fast on the first traversal
    # don't sleep longer than this (in seconds) between resource inspections
    maxsleeptime = 3600
    # try to make a complete traversal take about this long 
    # (use a long time for low resource usage, a short one for tight synchronization)
    treetraversaltime = 86400 # we want a tree traversal to take about one day after the initial sync
    # the default name of this node with which it will advertise itself
    # to remote nodes. if you have a valid host name (DNS entry), use it here:
    nodename = unknown.invalid
    
    [gui]
    # start master process on gui startup
    autostartp2p = True
    # if it exists, this (python) script will be executed by the angelshell when you load it
    angelshellinit = '%(angelshellinit)s'

    [mounttab]
    """ % ( defaults )

    cfg = ConfigObj(defaultconfig_txt.splitlines(), configspec = _configspec_lines())
    cfg.interpolation = False
    assert isValidConfig(cfg) == True
    return cfg
Exemplo n.º 18
0
'''

import sys, os
# from twisted.internet import default
# default.install()
from twisted.python.runtime import platform

# print(os.name, '000000000000')                      # posix
if os.name != 'nt':
    try:
        if platform.isLinux():
            try:
                from twisted.internet.epollreactor import install
            except ImportError:
                from twisted.internet.pollreactor import install
        elif platform.getType() == 'posix' and not platform.isMacOSX():
            from twisted.internet.pollreactor import install
        else:
            from twisted.internet.selectreactor import install
    except ImportError:
        from twisted.internet.selectreactor import install
    install()

from twisted.internet import reactor
from twisted.python import log
from learn_twist.utils import services
from learn_twist.netconnect.protoc import LiberateFactory

reactor = reactor
service = services.CommandService("loginService",
                                  run_style=services.Service.PARALLEL_STYLE)
Exemplo n.º 19
0
def isMacOSX():
    return platform.isMacOSX()
Exemplo n.º 20
0
import os
import socket
import uuid
import time

from twisted.protocols import basic, policies
from twisted.python.runtime import platform

DATA_FOLDER = os.path.abspath(
    os.path.join(os.path.dirname( __file__), '_data/'))

# Ошибки
BAD_COMMAND_CODE = 500
DEFAULT_BAD_COMMAND_MSG = u"""Неверный синтаксис команды"""

if platform.isMacOSX():
    DNSNAME = socket.gethostname()
else:
    DNSNAME = socket.getfqdn()


class ProtocolError(Exception):
    """
    Исключение для некорректных команд.
    """


class TestedProtocol(basic.LineReceiver, policies.TimeoutMixin):
    """
    Тестовый протокол.
    """
Exemplo n.º 21
0
    def wrapService(cls, path, service, store, uid=None, gid=None,
                    parallel=0, spawner=None, merge=False):
        """
        Create an L{UpgradeToDatabaseService} if there are still file-based
        calendar or addressbook homes remaining in the given path.

        @param path: a path pointing at the document root, where the file-based
            data-store is located.
        @type path: L{CachingFilePath}

        @param service: the service to wrap.  This service should be started
            when the upgrade is complete.  (This is accomplished by returning
            it directly when no upgrade needs to be done, and by adding it to
            the service hierarchy when the upgrade completes; assuming that the
            service parent of the resulting service will be set to a
            L{MultiService} or similar.)

        @param store: the SQL storage service.

        @type service: L{IService}

        @param parallel: The number of parallel subprocesses that should manage
            the upgrade.

        @param spawner: a concrete L{StoreSpawnerService} subclass that will be
            used to spawn helper processes.

        @param merge: merge filesystem homes into SQL homes, rather than
            skipping them.

        @return: a service
        @rtype: L{IService}
        """
        # TODO: TOPPATHS should be computed based on enabled flags in 'store',
        # not hard coded.
        for homeType in TOPPATHS:
            if path.child(homeType).exists():
                if platform.isMacOSX():
                    appropriateStoreClass = XattrPropertyStore
                else:
                    attrs = xattr.xattr(path.path)
                    try:
                        attrs.get('user.should-not-be-set')
                    except IOError, ioe:
                        if ioe.errno == errno.ENODATA:
                            # xattrs are supported and enabled on the filesystem
                            # where the calendar data lives.  this takes some
                            # doing (you have to edit fstab), so this means
                            # we're trying to migrate some 2.x data from a
                            # previous linux installation.
                            appropriateStoreClass = XattrPropertyStore
                        elif ioe.errno == errno.EOPNOTSUPP:
                            # The operation wasn't supported.  This is what will
                            # usually happen on a naively configured filesystem,
                            # so this means we're most likely trying to migrate
                            # some data from an untarred archive created on an
                            # OS X installation using xattrs.
                            appropriateStoreClass = AppleDoubleStore
                        else:
                            # No need to check for ENOENT and the like; we just
                            # checked above to make sure the parent exists.
                            # Other errors are not anticipated here, so fail
                            # fast.
                            raise

                    appropriateStoreClass = AppleDoubleStore

                self = cls(
                    FileStore(path, None, True, True,
                              propertyStoreClass=appropriateStoreClass),
                    store, service, uid=uid, gid=gid,
                    parallel=parallel, spawner=spawner, merge=merge
                )
                return self
Exemplo n.º 22
0
class ReactorBuilder:
    """
    L{SynchronousTestCase} mixin which provides a reactor-creation API.  This
    mixin defines C{setUp} and C{tearDown}, so mix it in before
    L{SynchronousTestCase} or call its methods from the overridden ones in the
    subclass.

    @cvar skippedReactors: A dict mapping FQPN strings of reactors for
        which the tests defined by this class will be skipped to strings
        giving the skip message.
    @cvar requiredInterfaces: A C{list} of interfaces which the reactor must
        provide or these tests will be skipped.  The default, C{None}, means
        that no interfaces are required.
    @ivar reactorFactory: A no-argument callable which returns the reactor to
        use for testing.
    @ivar originalHandler: The SIGCHLD handler which was installed when setUp
        ran and which will be re-installed when tearDown runs.
    @ivar _reactors: A list of FQPN strings giving the reactors for which
        L{SynchronousTestCase}s will be created.
    """

    _reactors = [
        # Select works everywhere
        "twisted.internet.selectreactor.SelectReactor",
    ]

    if platform.isWindows():
        # PortableGtkReactor is only really interesting on Windows,
        # but not really Windows specific; if you want you can
        # temporarily move this up to the all-platforms list to test
        # it on other platforms.  It's not there in general because
        # it's not _really_ worth it to support on other platforms,
        # since no one really wants to use it on other platforms.
        _reactors.extend([
            "twisted.internet.gtk2reactor.PortableGtkReactor",
            "twisted.internet.gireactor.PortableGIReactor",
            "twisted.internet.gtk3reactor.PortableGtk3Reactor",
            "twisted.internet.win32eventreactor.Win32Reactor",
            "twisted.internet.iocpreactor.reactor.IOCPReactor"
        ])
    else:
        _reactors.extend([
            "twisted.internet.glib2reactor.Glib2Reactor",
            "twisted.internet.gtk2reactor.Gtk2Reactor",
            "twisted.internet.gireactor.GIReactor",
            "twisted.internet.gtk3reactor.Gtk3Reactor"
        ])
        if platform.isMacOSX():
            _reactors.append("twisted.internet.cfreactor.CFReactor")
        else:
            _reactors.extend([
                "twisted.internet.pollreactor.PollReactor",
                "twisted.internet.epollreactor.EPollReactor"
            ])
            if not platform.isLinux():
                # Presumably Linux is not going to start supporting kqueue, so
                # skip even trying this configuration.
                _reactors.extend([
                    # Support KQueue on non-OS-X POSIX platforms for now.
                    "twisted.internet.kqreactor.KQueueReactor",
                ])

    reactorFactory = None
    originalHandler = None
    requiredInterfaces = None
    skippedReactors = {}

    def setUp(self):
        """
        Clear the SIGCHLD handler, if there is one, to ensure an environment
        like the one which exists prior to a call to L{reactor.run}.
        """
        if not platform.isWindows():
            self.originalHandler = signal.signal(signal.SIGCHLD,
                                                 signal.SIG_DFL)

    def tearDown(self):
        """
        Restore the original SIGCHLD handler and reap processes as long as
        there seem to be any remaining.
        """
        if self.originalHandler is not None:
            signal.signal(signal.SIGCHLD, self.originalHandler)
        if process is not None:
            begin = time.time()
            while process.reapProcessHandlers:
                log.msg("ReactorBuilder.tearDown reaping some processes %r" %
                        (process.reapProcessHandlers, ))
                process.reapAllProcesses()

                # The process should exit on its own.  However, if it
                # doesn't, we're stuck in this loop forever.  To avoid
                # hanging the test suite, eventually give the process some
                # help exiting and move on.
                time.sleep(0.001)
                if time.time() - begin > 60:
                    for pid in process.reapProcessHandlers:
                        os.kill(pid, signal.SIGKILL)
                    raise Exception(
                        "Timeout waiting for child processes to exit: %r" %
                        (process.reapProcessHandlers, ))

    def unbuildReactor(self, reactor):
        """
        Clean up any resources which may have been allocated for the given
        reactor by its creation or by a test which used it.
        """
        # Chris says:
        #
        # XXX These explicit calls to clean up the waker (and any other
        # internal readers) should become obsolete when bug #3063 is
        # fixed. -radix, 2008-02-29. Fortunately it should probably cause an
        # error when bug #3063 is fixed, so it should be removed in the same
        # branch that fixes it.
        #
        # -exarkun
        reactor._uninstallHandler()
        if getattr(reactor, '_internalReaders', None) is not None:
            for reader in reactor._internalReaders:
                reactor.removeReader(reader)
                reader.connectionLost(None)
            reactor._internalReaders.clear()

        # Here's an extra thing unrelated to wakers but necessary for
        # cleaning up after the reactors we make.  -exarkun
        reactor.disconnectAll()

        # It would also be bad if any timed calls left over were allowed to
        # run.
        calls = reactor.getDelayedCalls()
        for c in calls:
            c.cancel()

    def buildReactor(self):
        """
        Create and return a reactor using C{self.reactorFactory}.
        """
        try:
            from twisted.internet.cfreactor import CFReactor
            from twisted.internet import reactor as globalReactor
        except ImportError:
            pass
        else:
            if (isinstance(globalReactor, CFReactor)
                    and self.reactorFactory is CFReactor):
                raise SkipTest(
                    "CFReactor uses APIs which manipulate global state, "
                    "so it's not safe to run its own reactor-builder tests "
                    "under itself")
        try:
            reactor = self.reactorFactory()
        except:
            # Unfortunately, not all errors which result in a reactor
            # being unusable are detectable without actually
            # instantiating the reactor.  So we catch some more here
            # and skip the test if necessary.  We also log it to aid
            # with debugging, but flush the logged error so the test
            # doesn't fail.
            log.err(None, "Failed to install reactor")
            self.flushLoggedErrors()
            raise SkipTest(Failure().getErrorMessage())
        else:
            if self.requiredInterfaces is not None:
                missing = [
                    required for required in self.requiredInterfaces
                    if not required.providedBy(reactor)
                ]
                if missing:
                    self.unbuildReactor(reactor)
                    raise SkipTest(
                        "%s does not provide %s" %
                        (fullyQualifiedName(reactor.__class__), ",".join(
                            [fullyQualifiedName(x) for x in missing])))
        self.addCleanup(self.unbuildReactor, reactor)
        return reactor

    def getTimeout(self):
        """
        Determine how long to run the test before considering it failed.

        @return: A C{int} or C{float} giving a number of seconds.
        """
        return acquireAttribute(self._parents, 'timeout',
                                DEFAULT_TIMEOUT_DURATION)

    def runReactor(self, reactor, timeout=None):
        """
        Run the reactor for at most the given amount of time.

        @param reactor: The reactor to run.

        @type timeout: C{int} or C{float}
        @param timeout: The maximum amount of time, specified in seconds, to
            allow the reactor to run.  If the reactor is still running after
            this much time has elapsed, it will be stopped and an exception
            raised.  If C{None}, the default test method timeout imposed by
            Trial will be used.  This depends on the L{IReactorTime}
            implementation of C{reactor} for correct operation.

        @raise TestTimeoutError: If the reactor is still running after
            C{timeout} seconds.
        """
        if timeout is None:
            timeout = self.getTimeout()

        timedOut = []

        def stop():
            timedOut.append(None)
            reactor.stop()

        timedOutCall = reactor.callLater(timeout, stop)
        reactor.run()
        if timedOut:
            raise TestTimeoutError("reactor still running after %s seconds" %
                                   (timeout, ))
        else:
            timedOutCall.cancel()

    def makeTestCaseClasses(cls):
        """
        Create a L{SynchronousTestCase} subclass which mixes in C{cls} for each
        known reactor and return a dict mapping their names to them.
        """
        classes = {}
        for reactor in cls._reactors:
            shortReactorName = reactor.split(".")[-1]
            name = (cls.__name__ + "." + shortReactorName).replace(".", "_")

            class testcase(cls, SynchronousTestCase):
                __module__ = cls.__module__
                if reactor in cls.skippedReactors:
                    skip = cls.skippedReactors[reactor]
                try:
                    reactorFactory = namedAny(reactor)
                except:
                    skip = Failure().getErrorMessage()

            testcase.__name__ = name
            classes[testcase.__name__] = testcase
        return classes

    makeTestCaseClasses = classmethod(makeTestCaseClasses)
Exemplo n.º 23
0
class ProcessTestsBuilderBase(ReactorBuilder):
    """
    Base class for L{IReactorProcess} tests which defines some tests which
    can be applied to PTY or non-PTY uses of C{spawnProcess}.

    Subclasses are expected to set the C{usePTY} attribute to C{True} or
    C{False}.
    """

    requiredInterfaces = [IReactorProcess]

    def test_processTransportInterface(self):
        """
        L{IReactorProcess.spawnProcess} connects the protocol passed to it
        to a transport which provides L{IProcessTransport}.
        """
        ended = Deferred()
        protocol = _ShutdownCallbackProcessProtocol(ended)

        reactor = self.buildReactor()
        transport = reactor.spawnProcess(
            protocol, pyExe, [pyExe, b"-c", b""], usePTY=self.usePTY
        )

        # The transport is available synchronously, so we can check it right
        # away (unlike many transport-based tests).  This is convenient even
        # though it's probably not how the spawnProcess interface should really
        # work.
        # We're not using verifyObject here because part of
        # IProcessTransport is a lie - there are no getHost or getPeer
        # methods.  See #1124.
        self.assertTrue(IProcessTransport.providedBy(transport))

        # Let the process run and exit so we don't leave a zombie around.
        ended.addCallback(lambda ignored: reactor.stop())
        self.runReactor(reactor)

    def _writeTest(self, write):
        """
        Helper for testing L{IProcessTransport} write functionality.  This
        method spawns a child process and gives C{write} a chance to write some
        bytes to it.  It then verifies that the bytes were actually written to
        it (by relying on the child process to echo them back).

        @param write: A two-argument callable.  This is invoked with a process
            transport and some bytes to write to it.
        """
        reactor = self.buildReactor()

        ended = Deferred()
        protocol = _ShutdownCallbackProcessProtocol(ended)

        bytesToSend = b"hello, world" + networkString(os.linesep)
        program = b"import sys\n" b"sys.stdout.write(sys.stdin.readline())\n"

        def startup():
            transport = reactor.spawnProcess(protocol, pyExe, [pyExe, b"-c", program])
            try:
                write(transport, bytesToSend)
            except BaseException:
                err(None, "Unhandled exception while writing")
                transport.signalProcess("KILL")

        reactor.callWhenRunning(startup)

        ended.addCallback(lambda ignored: reactor.stop())

        self.runReactor(reactor)
        self.assertEqual(bytesToSend, b"".join(protocol.received[1]))

    def test_write(self):
        """
        L{IProcessTransport.write} writes the specified C{bytes} to the standard
        input of the child process.
        """

        def write(transport, bytesToSend):
            transport.write(bytesToSend)

        self._writeTest(write)

    def test_writeSequence(self):
        """
        L{IProcessTransport.writeSequence} writes the specified C{list} of
        C{bytes} to the standard input of the child process.
        """

        def write(transport, bytesToSend):
            transport.writeSequence([bytesToSend])

        self._writeTest(write)

    def test_writeToChild(self):
        """
        L{IProcessTransport.writeToChild} writes the specified C{bytes} to the
        specified file descriptor of the child process.
        """

        def write(transport, bytesToSend):
            transport.writeToChild(0, bytesToSend)

        self._writeTest(write)

    def test_writeToChildBadFileDescriptor(self):
        """
        L{IProcessTransport.writeToChild} raises L{KeyError} if passed a file
        descriptor which is was not set up by L{IReactorProcess.spawnProcess}.
        """

        def write(transport, bytesToSend):
            try:
                self.assertRaises(KeyError, transport.writeToChild, 13, bytesToSend)
            finally:
                # Just get the process to exit so the test can complete
                transport.write(bytesToSend)

        self._writeTest(write)

    @skipIf(
        getattr(signal, "SIGCHLD", None) is None,
        "Platform lacks SIGCHLD, early-spawnProcess test can't work.",
    )
    def test_spawnProcessEarlyIsReaped(self):
        """
        If, before the reactor is started with L{IReactorCore.run}, a
        process is started with L{IReactorProcess.spawnProcess} and
        terminates, the process is reaped once the reactor is started.
        """
        reactor = self.buildReactor()

        # Create the process with no shared file descriptors, so that there
        # are no other events for the reactor to notice and "cheat" with.
        # We want to be sure it's really dealing with the process exiting,
        # not some associated event.
        if self.usePTY:
            childFDs = None
        else:
            childFDs = {}

        # Arrange to notice the SIGCHLD.
        signaled = threading.Event()

        def handler(*args):
            signaled.set()

        signal.signal(signal.SIGCHLD, handler)

        # Start a process - before starting the reactor!
        ended = Deferred()
        reactor.spawnProcess(
            _ShutdownCallbackProcessProtocol(ended),
            pyExe,
            [pyExe, b"-c", b""],
            usePTY=self.usePTY,
            childFDs=childFDs,
        )

        # Wait for the SIGCHLD (which might have been delivered before we got
        # here, but that's okay because the signal handler was installed above,
        # before we could have gotten it).
        signaled.wait(120)
        if not signaled.isSet():
            self.fail("Timed out waiting for child process to exit.")

        # Capture the processEnded callback.
        result = []
        ended.addCallback(result.append)

        if result:
            # The synchronous path through spawnProcess / Process.__init__ /
            # registerReapProcessHandler was encountered.  There's no reason to
            # start the reactor, because everything is done already.
            return

        # Otherwise, though, start the reactor so it can tell us the process
        # exited.
        ended.addCallback(lambda ignored: reactor.stop())
        self.runReactor(reactor)

        # Make sure the reactor stopped because the Deferred fired.
        self.assertTrue(result)

    def test_processExitedWithSignal(self):
        """
        The C{reason} argument passed to L{IProcessProtocol.processExited} is a
        L{ProcessTerminated} instance if the child process exits with a signal.
        """
        sigName = "TERM"
        sigNum = getattr(signal, "SIG" + sigName)
        exited = Deferred()
        source = (
            b"import sys\n"
            # Talk so the parent process knows the process is running.  This is
            # necessary because ProcessProtocol.makeConnection may be called
            # before this process is exec'd.  It would be unfortunate if we
            # SIGTERM'd the Twisted process while it was on its way to doing
            # the exec.
            b"sys.stdout.write('x')\n"
            b"sys.stdout.flush()\n"
            b"sys.stdin.read()\n"
        )

        class Exiter(ProcessProtocol):
            def childDataReceived(self, fd, data):
                msg("childDataReceived(%d, %r)" % (fd, data))
                self.transport.signalProcess(sigName)

            def childConnectionLost(self, fd):
                msg("childConnectionLost(%d)" % (fd,))

            def processExited(self, reason):
                msg(f"processExited({reason!r})")
                # Protect the Deferred from the failure so that it follows
                # the callback chain.  This doesn't use the errback chain
                # because it wants to make sure reason is a Failure.  An
                # Exception would also make an errback-based test pass, and
                # that would be wrong.
                exited.callback([reason])

            def processEnded(self, reason):
                msg(f"processEnded({reason!r})")

        reactor = self.buildReactor()
        reactor.callWhenRunning(
            reactor.spawnProcess,
            Exiter(),
            pyExe,
            [pyExe, b"-c", source],
            usePTY=self.usePTY,
        )

        def cbExited(args):
            (failure,) = args
            # Trapping implicitly verifies that it's a Failure (rather than
            # an exception) and explicitly makes sure it's the right type.
            failure.trap(ProcessTerminated)
            err = failure.value
            if platform.isWindows():
                # Windows can't really /have/ signals, so it certainly can't
                # report them as the reason for termination.  Maybe there's
                # something better we could be doing here, anyway?  Hard to
                # say.  Anyway, this inconsistency between different platforms
                # is extremely unfortunate and I would remove it if I
                # could. -exarkun
                self.assertIsNone(err.signal)
                self.assertEqual(err.exitCode, 1)
            else:
                self.assertEqual(err.signal, sigNum)
                self.assertIsNone(err.exitCode)

        exited.addCallback(cbExited)
        exited.addErrback(err)
        exited.addCallback(lambda ign: reactor.stop())

        self.runReactor(reactor)

    def test_systemCallUninterruptedByChildExit(self):
        """
        If a child process exits while a system call is in progress, the system
        call should not be interfered with.  In particular, it should not fail
        with EINTR.

        Older versions of Twisted installed a SIGCHLD handler on POSIX without
        using the feature exposed by the SA_RESTART flag to sigaction(2).  The
        most noticeable problem this caused was for blocking reads and writes to
        sometimes fail with EINTR.
        """
        reactor = self.buildReactor()
        result = []

        def f():
            try:
                exe = pyExe.decode(sys.getfilesystemencoding())

                subprocess.Popen([exe, "-c", "import time; time.sleep(0.1)"])
                f2 = subprocess.Popen(
                    [exe, "-c", ("import time; time.sleep(0.5);" "print('Foo')")],
                    stdout=subprocess.PIPE,
                )
                # The read call below will blow up with an EINTR from the
                # SIGCHLD from the first process exiting if we install a
                # SIGCHLD handler without SA_RESTART.  (which we used to do)
                with f2.stdout:
                    result.append(f2.stdout.read())
            finally:
                reactor.stop()

        reactor.callWhenRunning(f)
        self.runReactor(reactor)
        self.assertEqual(result, [b"Foo" + os.linesep.encode("ascii")])

    @skipIf(platform.isWindows(), "Test only applies to POSIX platforms.")
    # If you see this comment and are running on macOS, try to see if this pass on your environment.
    # Only run this test on Linux and macOS local tests and Linux CI platforms.
    # This should be used for POSIX tests that are expected to pass on macOS but which fail due to lack of macOS developers.
    # We still want to run it on local development macOS environments to help developers discover and fix this issue.
    @skipIf(
        platform.isMacOSX() and os.environ.get("CI", "").lower() == "true",
        "Skipped on macOS CI env.",
    )
    def test_openFileDescriptors(self):
        """
        Processes spawned with spawnProcess() close all extraneous file
        descriptors in the parent.  They do have a stdin, stdout, and stderr
        open.
        """

        # To test this, we are going to open a file descriptor in the parent
        # that is unlikely to be opened in the child, then verify that it's not
        # open in the child.
        source = networkString(
            """
import sys
sys.path.insert(0, '{}')
from twisted.internet import process
sys.stdout.write(repr(process._listOpenFDs()))
sys.stdout.flush()""".format(
                twistedRoot.path
            )
        )

        r, w = os.pipe()
        self.addCleanup(os.close, r)
        self.addCleanup(os.close, w)

        # The call to "os.listdir()" (in _listOpenFDs's implementation) opens a
        # file descriptor (with "opendir"), which shows up in _listOpenFDs's
        # result.  And speaking of "random" file descriptors, the code required
        # for _listOpenFDs itself imports logger, which imports random, which
        # (depending on your Python version) might leave /dev/urandom open.

        # More generally though, even if we were to use an extremely minimal C
        # program, the operating system would be within its rights to open file
        # descriptors we might not know about in the C library's
        # initialization; things like debuggers, profilers, or nsswitch plugins
        # might open some and this test should pass in those environments.

        # Although some of these file descriptors aren't predictable, we should
        # at least be able to select a very large file descriptor which is very
        # unlikely to be opened automatically in the subprocess.  (Apply a
        # fudge factor to avoid hard-coding something too near a limit
        # condition like the maximum possible file descriptor, which a library
        # might at least hypothetically select.)

        fudgeFactor = 17
        unlikelyFD = resource.getrlimit(resource.RLIMIT_NOFILE)[0] - fudgeFactor

        os.dup2(w, unlikelyFD)
        self.addCleanup(os.close, unlikelyFD)

        output = io.BytesIO()

        class GatheringProtocol(ProcessProtocol):
            outReceived = output.write

            def processEnded(self, reason):
                reactor.stop()

        reactor = self.buildReactor()

        reactor.callWhenRunning(
            reactor.spawnProcess,
            GatheringProtocol(),
            pyExe,
            [pyExe, b"-Wignore", b"-c", source],
            usePTY=self.usePTY,
        )

        self.runReactor(reactor)
        reportedChildFDs = set(eval(output.getvalue()))

        stdFDs = [0, 1, 2]

        # Unfortunately this assertion is still not *entirely* deterministic,
        # since hypothetically, any library could open any file descriptor at
        # any time.  See comment above.
        self.assertEqual(
            reportedChildFDs.intersection(set(stdFDs + [unlikelyFD])), set(stdFDs)
        )

    @onlyOnPOSIX
    def test_errorDuringExec(self):
        """
        When L{os.execvpe} raises an exception, it will format that exception
        on stderr as UTF-8, regardless of system encoding information.
        """

        def execvpe(*args, **kw):
            # Ensure that real traceback formatting has some non-ASCII in it,
            # by forcing the filename of the last frame to contain non-ASCII.
            filename = "<\N{SNOWMAN}>"
            if not isinstance(filename, str):
                filename = filename.encode("utf-8")
            codeobj = compile("1/0", filename, "single")
            eval(codeobj)

        self.patch(os, "execvpe", execvpe)
        self.patch(sys, "getfilesystemencoding", lambda: "ascii")

        reactor = self.buildReactor()
        output = io.BytesIO()

        @reactor.callWhenRunning
        def whenRunning():
            class TracebackCatcher(ProcessProtocol):
                errReceived = output.write

                def processEnded(self, reason):
                    reactor.stop()

            reactor.spawnProcess(TracebackCatcher(), pyExe, [pyExe, b"-c", b""])

        self.runReactor(reactor, timeout=30)
        self.assertIn("\N{SNOWMAN}".encode(), output.getvalue())

    def test_timelyProcessExited(self):
        """
        If a spawned process exits, C{processExited} will be called in a
        timely manner.
        """
        reactor = self.buildReactor()

        class ExitingProtocol(ProcessProtocol):
            exited = False

            def processExited(protoSelf, reason):
                protoSelf.exited = True
                reactor.stop()
                self.assertEqual(reason.value.exitCode, 0)

        protocol = ExitingProtocol()
        reactor.callWhenRunning(
            reactor.spawnProcess,
            protocol,
            pyExe,
            [pyExe, b"-c", b"raise SystemExit(0)"],
            usePTY=self.usePTY,
        )

        # This will timeout if processExited isn't called:
        self.runReactor(reactor, timeout=30)
        self.assertTrue(protocol.exited)

    def _changeIDTest(self, which):
        """
        Launch a child process, using either the C{uid} or C{gid} argument to
        L{IReactorProcess.spawnProcess} to change either its UID or GID to a
        different value.  If the child process reports this hasn't happened,
        raise an exception to fail the test.

        @param which: Either C{b"uid"} or C{b"gid"}.
        """
        program = ["import os", f"raise SystemExit(os.get{which}() != 1)"]

        container = []

        class CaptureExitStatus(ProcessProtocol):
            def processEnded(self, reason):
                container.append(reason)
                reactor.stop()

        reactor = self.buildReactor()
        protocol = CaptureExitStatus()
        reactor.callWhenRunning(
            reactor.spawnProcess,
            protocol,
            pyExe,
            [pyExe, "-c", "\n".join(program)],
            **{which: 1},
        )

        self.runReactor(reactor)

        self.assertEqual(0, container[0].value.exitCode)

    @skipIf(_uidgidSkip, _uidgidSkipReason)
    def test_changeUID(self):
        """
        If a value is passed for L{IReactorProcess.spawnProcess}'s C{uid}, the
        child process is run with that UID.
        """
        self._changeIDTest("uid")

    @skipIf(_uidgidSkip, _uidgidSkipReason)
    def test_changeGID(self):
        """
        If a value is passed for L{IReactorProcess.spawnProcess}'s C{gid}, the
        child process is run with that GID.
        """
        self._changeIDTest("gid")

    def test_processExitedRaises(self):
        """
        If L{IProcessProtocol.processExited} raises an exception, it is logged.
        """
        # Ideally we wouldn't need to poke the process module; see
        # https://twistedmatrix.com/trac/ticket/6889
        reactor = self.buildReactor()

        class TestException(Exception):
            pass

        class Protocol(ProcessProtocol):
            def processExited(self, reason):
                reactor.stop()
                raise TestException("processedExited raised")

        protocol = Protocol()
        transport = reactor.spawnProcess(
            protocol, pyExe, [pyExe, b"-c", b""], usePTY=self.usePTY
        )
        self.runReactor(reactor)

        # Manually clean-up broken process handler.
        # Only required if the test fails on systems that support
        # the process module.
        if process is not None:
            for pid, handler in list(process.reapProcessHandlers.items()):
                if handler is not transport:
                    continue
                process.unregisterReapProcessHandler(pid, handler)
                self.fail(
                    "After processExited raised, transport was left in"
                    " reapProcessHandlers"
                )

        self.assertEqual(1, len(self.flushLoggedErrors(TestException)))