Exemplo n.º 1
0
    def test_processExitedRaises(self):
        """
        If L{IProcessProtocol.processExited} raises an exception, it is logged.
        """
        # Ideally we wouldn't need to poke the process module; see
        # https://twistedmatrix.com/trac/ticket/6889
        reactor = self.buildReactor()

        class TestException(Exception):
            pass

        class Protocol(ProcessProtocol):
            def processExited(self, reason):
                reactor.stop()
                raise TestException("processedExited raised")

        protocol = Protocol()
        transport = reactor.spawnProcess(protocol,
                                         pyExe, [pyExe, b"-c", b""],
                                         usePTY=self.usePTY)
        self.runReactor(reactor)

        # Manually clean-up broken process handler.
        # Only required if the test fails on systems that support
        # the process module.
        if process is not None:
            for pid, handler in items(process.reapProcessHandlers):
                if handler is not transport:
                    continue
                process.unregisterReapProcessHandler(pid, handler)
                self.fail("After processExited raised, transport was left in"
                          " reapProcessHandlers")

        self.assertEqual(1, len(self.flushLoggedErrors(TestException)))
    def test_processExitedRaises(self):
        """
        If L{IProcessProtocol.processExited} raises an exception, it is logged.
        """
        # Ideally we wouldn't need to poke the process module; see
        # https://twistedmatrix.com/trac/ticket/6889
        reactor = self.buildReactor()

        class TestException(Exception):
            pass

        class Protocol(ProcessProtocol):
            def processExited(self, reason):
                reactor.stop()
                raise TestException("processedExited raised")

        protocol = Protocol()
        transport = reactor.spawnProcess(
               protocol, pyExe, [pyExe, b"-c", b""],
               usePTY=self.usePTY)
        self.runReactor(reactor)

        # Manually clean-up broken process handler.
        # Only required if the test fails on systems that support
        # the process module.
        if process is not None:
            for pid, handler in items(process.reapProcessHandlers):
                if handler is not transport:
                    continue
                process.unregisterReapProcessHandler(pid, handler)
                self.fail("After processExited raised, transport was left in"
                          " reapProcessHandlers")

        self.assertEqual(1, len(self.flushLoggedErrors(TestException)))
Exemplo n.º 3
0
    def _groupResults(self, results, formatter):
        """
        Group tests together based on their results.

        @param results: An iterable of tuples of two or more elements.  The
            first element of each tuple is a test case.  The remaining
            elements describe the outcome of that test case.

        @param formatter: A callable which turns a test case result into a
            string.  The elements after the first of the tuples in
            C{results} will be passed as positional arguments to
            C{formatter}.

        @return: A C{list} of two-tuples.  The first element of each tuple
            is a unique string describing one result from at least one of
            the test cases in C{results}.  The second element is a list of
            the test cases which had that result.
        """
        groups = OrderedDict()
        for content in results:
            case = content[0]
            outcome = content[1:]
            key = formatter(*outcome)
            groups.setdefault(key, []).append(case)
        return items(groups)
Exemplo n.º 4
0
class Reporter(TestResult):
    """
    A basic L{TestResult} with support for writing to a stream.

    @ivar _startTime: The time when the first test was started. It defaults to
        L{None}, which means that no test was actually launched.
    @type _startTime: C{float} or L{None}

    @ivar _warningCache: A C{set} of tuples of warning message (file, line,
        text, category) which have already been written to the output stream
        during the currently executing test.  This is used to avoid writing
        duplicates of the same warning to the output stream.
    @type _warningCache: C{set}

    @ivar _publisher: The log publisher which will be observed for warning
        events.
    @type _publisher: L{twisted.python.log.LogPublisher}
    """

    _separator = '-' * 79
    _doubleSeparator = '=' * 79

    def __init__(self,
                 stream=sys.stdout,
                 tbformat='default',
                 realtime=False,
                 publisher=None):
        super(Reporter, self).__init__()
        self._stream = SafeStream(stream)
        self.tbformat = tbformat
        self.realtime = realtime
        self._startTime = None
        self._warningCache = set()

        # Start observing log events so as to be able to report warnings.
        self._publisher = publisher
        if publisher is not None:
            publisher.addObserver(self._observeWarnings)

    def _observeWarnings(self, event):
        """
        Observe warning events and write them to C{self._stream}.

        This method is a log observer which will be registered with
        C{self._publisher.addObserver}.

        @param event: A C{dict} from the logging system.  If it has a
            C{'warning'} key, a logged warning will be extracted from it and
            possibly written to C{self.stream}.
        """
        if 'warning' in event:
            key = (event['filename'], event['lineno'],
                   event['category'].split('.')[-1], str(event['warning']))
            if key not in self._warningCache:
                self._warningCache.add(key)
                self._stream.write('%s:%s: %s: %s\n' % key)

    def startTest(self, test):
        """
        Called when a test begins to run. Records the time when it was first
        called and resets the warning cache.

        @param test: L{ITestCase}
        """
        super(Reporter, self).startTest(test)
        if self._startTime is None:
            self._startTime = self._getTime()
        self._warningCache = set()

    def addFailure(self, test, fail):
        """
        Called when a test fails. If C{realtime} is set, then it prints the
        error to the stream.

        @param test: L{ITestCase} that failed.
        @param fail: L{failure.Failure} containing the error.
        """
        super(Reporter, self).addFailure(test, fail)
        if self.realtime:
            fail = self.failures[-1][1]  # guarantee it's a Failure
            self._write(self._formatFailureTraceback(fail))

    def addError(self, test, error):
        """
        Called when a test raises an error. If C{realtime} is set, then it
        prints the error to the stream.

        @param test: L{ITestCase} that raised the error.
        @param error: L{failure.Failure} containing the error.
        """
        error = self._getFailure(error)
        super(Reporter, self).addError(test, error)
        if self.realtime:
            error = self.errors[-1][1]  # guarantee it's a Failure
            self._write(self._formatFailureTraceback(error))

    def _write(self, format, *args):
        """
        Safely write to the reporter's stream.

        @param format: A format string to write.
        @param *args: The arguments for the format string.
        """
        s = str(format)
        assert isinstance(s, type(''))
        if args:
            self._stream.write(s % args)
        else:
            self._stream.write(s)
        untilConcludes(self._stream.flush)

    def _writeln(self, format, *args):
        """
        Safely write a line to the reporter's stream. Newline is appended to
        the format string.

        @param format: A format string to write.
        @param *args: The arguments for the format string.
        """
        self._write(format, *args)
        self._write('\n')

    def upDownError(self, method, error, warn, printStatus):
        super(Reporter, self).upDownError(method, error, warn, printStatus)
        if warn:
            tbStr = self._formatFailureTraceback(error)
            log.msg(tbStr)
            msg = ("caught exception in %s, your TestCase is broken\n\n%s" %
                   (method, tbStr))
            warnings.warn(msg, BrokenTestCaseWarning, stacklevel=2)

    def cleanupErrors(self, errs):
        super(Reporter, self).cleanupErrors(errs)
        warnings.warn(
            "%s\n%s" % ("REACTOR UNCLEAN! traceback(s) follow: ",
                        self._formatFailureTraceback(errs)),
            BrokenTestCaseWarning)

    def _trimFrames(self, frames):
        """
        Trim frames to remove internal paths.

        When a C{SynchronousTestCase} method fails synchronously, the stack
        looks like this:
         - [0]: C{SynchronousTestCase._run}
         - [1]: C{util.runWithWarningsSuppressed}
         - [2:-2]: code in the test method which failed
         - [-1]: C{_synctest.fail}

        When a C{TestCase} method fails synchronously, the stack looks like
        this:
         - [0]: C{defer.maybeDeferred}
         - [1]: C{utils.runWithWarningsSuppressed}
         - [2]: C{utils.runWithWarningsSuppressed}
         - [3:-2]: code in the test method which failed
         - [-1]: C{_synctest.fail}

        When a method fails inside a C{Deferred} (i.e., when the test method
        returns a C{Deferred}, and that C{Deferred}'s errback fires), the stack
        captured inside the resulting C{Failure} looks like this:
         - [0]: C{defer.Deferred._runCallbacks}
         - [1:-2]: code in the testmethod which failed
         - [-1]: C{_synctest.fail}

        As a result, we want to trim either [maybeDeferred, runWWS, runWWS] or
        [Deferred._runCallbacks] or [SynchronousTestCase._run, runWWS] from the
        front, and trim the [unittest.fail] from the end.

        There is also another case, when the test method is badly defined and
        contains extra arguments.

        If it doesn't recognize one of these cases, it just returns the
        original frames.

        @param frames: The C{list} of frames from the test failure.

        @return: The C{list} of frames to display.
        """
        newFrames = list(frames)

        if len(frames) < 2:
            return newFrames

        firstMethod = newFrames[0][0]
        firstFile = os.path.splitext(os.path.basename(newFrames[0][1]))[0]

        secondMethod = newFrames[1][0]
        secondFile = os.path.splitext(os.path.basename(newFrames[1][1]))[0]

        syncCase = (("_run", "_synctest"), ("runWithWarningsSuppressed",
                                            "util"))
        asyncCase = (("maybeDeferred", "defer"), ("runWithWarningsSuppressed",
                                                  "utils"))

        twoFrames = ((firstMethod, firstFile), (secondMethod, secondFile))

        if _PY3:
            # On PY3, we have an extra frame which is reraising the exception
            for frame in newFrames:
                frameFile = os.path.splitext(os.path.basename(frame[1]))[0]
                if frameFile == "compat" and frame[0] == "reraise":
                    # If it's in the compat module and is reraise, BLAM IT
                    newFrames.pop(newFrames.index(frame))

        if twoFrames == syncCase:
            newFrames = newFrames[2:]
        elif twoFrames == asyncCase:
            newFrames = newFrames[3:]
        elif (firstMethod, firstFile) == ("_runCallbacks", "defer"):
            newFrames = newFrames[1:]

        if not newFrames:
            # The method fails before getting called, probably an argument
            # problem
            return newFrames

        last = newFrames[-1]
        if (last[0].startswith('fail') and os.path.splitext(
                os.path.basename(last[1]))[0] == '_synctest'):
            newFrames = newFrames[:-1]

        return newFrames

    def _formatFailureTraceback(self, fail):
        if isinstance(fail, str):
            return fail.rstrip() + '\n'
        fail.frames, frames = self._trimFrames(fail.frames), fail.frames
        result = fail.getTraceback(detail=self.tbformat,
                                   elideFrameworkCode=True)
        fail.frames = frames
        return result

    def _groupResults(self, results, formatter):
        """
        Group tests together based on their results.

        @param results: An iterable of tuples of two or more elements.  The
            first element of each tuple is a test case.  The remaining
            elements describe the outcome of that test case.

        @param formatter: A callable which turns a test case result into a
            string.  The elements after the first of the tuples in
            C{results} will be passed as positional arguments to
            C{formatter}.

        @return: A C{list} of two-tuples.  The first element of each tuple
            is a unique string describing one result from at least one of
            the test cases in C{results}.  The second element is a list of
            the test cases which had that result.
        """
        groups = OrderedDict()
        for content in results:
            case = content[0]
            outcome = content[1:]
            key = formatter(*outcome)
            groups.setdefault(key, []).append(case)
        return items(groups)
Exemplo n.º 5
0
    def startElementNS(self, namespaceAndName, qname, attrs):
        """
        Gets called when we encounter a new xmlns attribute.

        @param namespaceAndName: a (namespace, name) tuple, where name
            determines which type of action to take, if the namespace matches
            L{TEMPLATE_NAMESPACE}.
        @param qname: ignored.
        @param attrs: attributes on the element being started.
        """

        filename = self.sourceFilename
        lineNumber = self.locator.getLineNumber()
        columnNumber = self.locator.getColumnNumber()

        ns, name = namespaceAndName
        if ns == TEMPLATE_NAMESPACE:
            if name == 'transparent':
                name = ''
            elif name == 'slot':
                try:
                    # Try to get the default value for the slot
                    default = attrs[(None, 'default')]
                except KeyError:
                    # If there wasn't one, then use None to indicate no
                    # default.
                    default = None
                el = slot(
                    attrs[(None, 'name')], default=default,
                    filename=filename, lineNumber=lineNumber,
                    columnNumber=columnNumber)
                self.stack.append(el)
                self.current.append(el)
                self.current = el.children
                return

        render = None

        attrs = OrderedDict(attrs)
        for k, v in items(attrs):
            attrNS, justTheName = k
            if attrNS != TEMPLATE_NAMESPACE:
                continue
            if justTheName == 'render':
                render = v
                del attrs[k]

        # nonTemplateAttrs is a dictionary mapping attributes that are *not* in
        # TEMPLATE_NAMESPACE to their values.  Those in TEMPLATE_NAMESPACE were
        # just removed from 'attrs' in the loop immediately above.  The key in
        # nonTemplateAttrs is either simply the attribute name (if it was not
        # specified as having a namespace in the template) or prefix:name,
        # preserving the xml namespace prefix given in the document.

        nonTemplateAttrs = OrderedDict()
        for (attrNs, attrName), v in items(attrs):
            nsPrefix = self.prefixMap.get(attrNs)
            if nsPrefix is None:
                attrKey = attrName
            else:
                attrKey = '%s:%s' % (nsPrefix, attrName)
            nonTemplateAttrs[attrKey] = v

        if ns == TEMPLATE_NAMESPACE and name == 'attr':
            if not self.stack:
                # TODO: define a better exception for this?
                raise AssertionError(
                    '<{%s}attr> as top-level element' % (TEMPLATE_NAMESPACE,))
            if 'name' not in nonTemplateAttrs:
                # TODO: same here
                raise AssertionError(
                    '<{%s}attr> requires a name attribute' % (TEMPLATE_NAMESPACE,))
            el = Tag('', render=render, filename=filename,
                     lineNumber=lineNumber, columnNumber=columnNumber)
            self.stack[-1].attributes[nonTemplateAttrs['name']] = el
            self.stack.append(el)
            self.current = el.children
            return

        # Apply any xmlns attributes
        if self.xmlnsAttrs:
            nonTemplateAttrs.update(OrderedDict(self.xmlnsAttrs))
            self.xmlnsAttrs = []

        # Add the prefix that was used in the parsed template for non-template
        # namespaces (which will not be consumed anyway).
        if ns != TEMPLATE_NAMESPACE and ns is not None:
            prefix = self.prefixMap[ns]
            if prefix is not None:
                name = '%s:%s' % (self.prefixMap[ns],name)
        el = Tag(
            name, attributes=OrderedDict(nonTemplateAttrs), render=render,
            filename=filename, lineNumber=lineNumber,
            columnNumber=columnNumber)
        self.stack.append(el)
        self.current.append(el)
        self.current = el.children
Exemplo n.º 6
0
    def _setupChild(self, fdmap):
        """
        fdmap[childFD] = parentFD

        The child wants to end up with 'childFD' attached to what used to be
        the parent's parentFD. As an example, a bash command run like
        'command 2>&1' would correspond to an fdmap of {0:0, 1:1, 2:1}.
        'command >foo.txt' would be {0:0, 1:os.open('foo.txt'), 2:2}.

        This is accomplished in two steps::

            1. close all file descriptors that aren't values of fdmap.  This
               means 0 .. maxfds (or just the open fds within that range, if
               the platform supports '/proc/<pid>/fd').

            2. for each childFD::

                 - if fdmap[childFD] == childFD, the descriptor is already in
                   place.  Make sure the CLOEXEC flag is not set, then delete
                   the entry from fdmap.

                 - if childFD is in fdmap.values(), then the target descriptor
                   is busy. Use os.dup() to move it elsewhere, update all
                   fdmap[childFD] items that point to it, then close the
                   original. Then fall through to the next case.

                 - now fdmap[childFD] is not in fdmap.values(), and is free.
                   Use os.dup2() to move it to the right place, then close the
                   original.
        """
        debug = self.debug_child
        if debug:
            errfd = sys.stderr
            errfd.write("starting _setupChild\n")

        destList = fdmap.values()
        for fd in _listOpenFDs():
            if fd in destList:
                continue
            if debug and fd == errfd.fileno():
                continue
            try:
                os.close(fd)
            except:
                pass

        # at this point, the only fds still open are the ones that need to
        # be moved to their appropriate positions in the child (the targets
        # of fdmap, i.e. fdmap.values() )

        if debug: print("fdmap", fdmap, file=errfd)
        for child in sorted(fdmap.keys()):
            target = fdmap[child]
            if target == child:
                # fd is already in place
                if debug: print("%d already in place" % target, file=errfd)
                fdesc._unsetCloseOnExec(child)
            else:
                if child in fdmap.values():
                    # we can't replace child-fd yet, as some other mapping
                    # still needs the fd it wants to target. We must preserve
                    # that old fd by duping it to a new home.
                    newtarget = os.dup(child)  # give it a safe home
                    if debug:
                        print("os.dup(%d) -> %d" % (child, newtarget),
                              file=errfd)
                    os.close(child)  # close the original
                    for c, p in items(fdmap):
                        if p == child:
                            fdmap[c] = newtarget  # update all pointers
                # now it should be available
                if debug: print("os.dup2(%d,%d)" % (target, child), file=errfd)
                os.dup2(target, child)

        # At this point, the child has everything it needs. We want to close
        # everything that isn't going to be used by the child, i.e.
        # everything not in fdmap.keys(). The only remaining fds open are
        # those in fdmap.values().

        # Any given fd may appear in fdmap.values() multiple times, so we
        # need to remove duplicates first.

        old = []
        for fd in fdmap.values():
            if not fd in old:
                if not fd in fdmap.keys():
                    old.append(fd)
        if debug: print("old", old, file=errfd)
        for fd in old:
            os.close(fd)

        self._resetSignalDisposition()
Exemplo n.º 7
0
    def __init__(self,
                 reactor,
                 executable,
                 args,
                 environment,
                 path,
                 proto,
                 uid=None,
                 gid=None,
                 childFDs=None):
        """
        Spawn an operating-system process.

        This is where the hard work of disconnecting all currently open
        files / forking / executing the new process happens.  (This is
        executed automatically when a Process is instantiated.)

        This will also run the subprocess as a given user ID and group ID, if
        specified.  (Implementation Note: this doesn't support all the arcane
        nuances of setXXuid on UNIX: it will assume that either your effective
        or real UID is 0.)
        """
        if not proto:
            assert 'r' not in childFDs.values()
            assert 'w' not in childFDs.values()
        _BaseProcess.__init__(self, proto)

        self.pipes = {}
        # keys are childFDs, we can sense them closing
        # values are ProcessReader/ProcessWriters

        helpers = {}
        # keys are childFDs
        # values are parentFDs

        if childFDs is None:
            childFDs = {
                0: "w",  # we write to the child's stdin
                1: "r",  # we read from their stdout
                2: "r",  # and we read from their stderr
            }

        debug = self.debug
        if debug: print("childFDs", childFDs)

        _openedPipes = []

        def pipe():
            r, w = os.pipe()
            _openedPipes.extend([r, w])
            return r, w

        # fdmap.keys() are filenos of pipes that are used by the child.
        fdmap = {}  # maps childFD to parentFD
        try:
            for childFD, target in items(childFDs):
                if debug: print("[%d]" % childFD, target)
                if target == "r":
                    # we need a pipe that the parent can read from
                    readFD, writeFD = pipe()
                    if debug:
                        print("readFD=%d, writeFD=%d" % (readFD, writeFD))
                    fdmap[childFD] = writeFD  # child writes to this
                    helpers[childFD] = readFD  # parent reads from this
                elif target == "w":
                    # we need a pipe that the parent can write to
                    readFD, writeFD = pipe()
                    if debug:
                        print("readFD=%d, writeFD=%d" % (readFD, writeFD))
                    fdmap[childFD] = readFD  # child reads from this
                    helpers[childFD] = writeFD  # parent writes to this
                else:
                    assert type(
                        target) == int, '%r should be an int' % (target, )
                    fdmap[childFD] = target  # parent ignores this
            if debug: print("fdmap", fdmap)
            if debug: print("helpers", helpers)
            # the child only cares about fdmap.values()

            self._fork(path,
                       uid,
                       gid,
                       executable,
                       args,
                       environment,
                       fdmap=fdmap)
        except:
            for pipe in _openedPipes:
                os.close(pipe)
            raise

        # we are the parent process:
        self.proto = proto

        # arrange for the parent-side pipes to be read and written
        for childFD, parentFD in items(helpers):
            os.close(fdmap[childFD])
            if childFDs[childFD] == "r":
                reader = self.processReaderFactory(reactor, self, childFD,
                                                   parentFD)
                self.pipes[childFD] = reader

            if childFDs[childFD] == "w":
                writer = self.processWriterFactory(reactor,
                                                   self,
                                                   childFD,
                                                   parentFD,
                                                   forceReadHack=True)
                self.pipes[childFD] = writer

        try:
            # the 'transport' is used for some compatibility methods
            if self.proto is not None:
                self.proto.makeConnection(self)
        except:
            log.err()

        # The reactor might not be running yet.  This might call back into
        # processEnded synchronously, triggering an application-visible
        # callback.  That's probably not ideal.  The replacement API for
        # spawnProcess should improve upon this situation.
        registerReapProcessHandler(self.pid, self)
    def __init__(self, reactor, protocol, command, args, environment, path):
        """
        Create a new child process.
        """
        _pollingfile._PollingTimer.__init__(self, reactor)
        BaseProcess.__init__(self, protocol)

        # security attributes for pipes
        sAttrs = win32security.SECURITY_ATTRIBUTES()
        sAttrs.bInheritHandle = 1

        # create the pipes which will connect to the secondary process
        self.hStdoutR, hStdoutW = win32pipe.CreatePipe(sAttrs, 0)
        self.hStderrR, hStderrW = win32pipe.CreatePipe(sAttrs, 0)
        hStdinR, self.hStdinW = win32pipe.CreatePipe(sAttrs, 0)

        win32pipe.SetNamedPipeHandleState(self.hStdinW, win32pipe.PIPE_NOWAIT,
                                          None, None)

        # set the info structure for the new process.
        StartupInfo = win32process.STARTUPINFO()
        StartupInfo.hStdOutput = hStdoutW
        StartupInfo.hStdError = hStderrW
        StartupInfo.hStdInput = hStdinR
        StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES

        # Create new handles whose inheritance property is false
        currentPid = win32api.GetCurrentProcess()

        tmp = win32api.DuplicateHandle(currentPid, self.hStdoutR, currentPid,
                                       0, 0, win32con.DUPLICATE_SAME_ACCESS)
        win32file.CloseHandle(self.hStdoutR)
        self.hStdoutR = tmp

        tmp = win32api.DuplicateHandle(currentPid, self.hStderrR, currentPid,
                                       0, 0, win32con.DUPLICATE_SAME_ACCESS)
        win32file.CloseHandle(self.hStderrR)
        self.hStderrR = tmp

        tmp = win32api.DuplicateHandle(currentPid, self.hStdinW, currentPid, 0,
                                       0, win32con.DUPLICATE_SAME_ACCESS)
        win32file.CloseHandle(self.hStdinW)
        self.hStdinW = tmp

        # Add the specified environment to the current environment - this is
        # necessary because certain operations are only supported on Windows
        # if certain environment variables are present.

        env = os.environ.copy()
        env.update(environment or {})
        newenv = {}
        for key, value in items(env):

            key = os.fsdecode(key)
            value = os.fsdecode(value)

            newenv[key] = value

        env = newenv

        # Make sure all the arguments are Unicode.
        args = [os.fsdecode(x) for x in args]

        cmdline = quoteArguments(args)

        # The command, too, needs to be Unicode, if it is a value.
        command = os.fsdecode(command) if command else command
        path = os.fsdecode(path) if path else path

        # TODO: error detection here.  See #2787 and #4184.
        def doCreate():
            flags = win32con.CREATE_NO_WINDOW
            self.hProcess, self.hThread, self.pid, dwTid = win32process.CreateProcess(
                command, cmdline, None, None, 1, flags, env, path, StartupInfo)

        try:
            doCreate()
        except pywintypes.error as pwte:
            if not _invalidWin32App(pwte):
                # This behavior isn't _really_ documented, but let's make it
                # consistent with the behavior that is documented.
                raise OSError(pwte)
            else:
                # look for a shebang line.  Insert the original 'command'
                # (actually a script) into the new arguments list.
                sheb = _findShebang(command)
                if sheb is None:
                    raise OSError("%r is neither a Windows executable, "
                                  "nor a script with a shebang line" % command)
                else:
                    args = list(args)
                    args.insert(0, command)
                    cmdline = quoteArguments(args)
                    origcmd = command
                    command = sheb
                    try:
                        # Let's try again.
                        doCreate()
                    except pywintypes.error as pwte2:
                        # d'oh, failed again!
                        if _invalidWin32App(pwte2):
                            raise OSError("%r has an invalid shebang line: "
                                          "%r is not a valid executable" %
                                          (origcmd, sheb))
                        raise OSError(pwte2)

        # close handles which only the child will use
        win32file.CloseHandle(hStderrW)
        win32file.CloseHandle(hStdoutW)
        win32file.CloseHandle(hStdinR)

        # set up everything
        self.stdout = _pollingfile._PollableReadPipe(
            self.hStdoutR, lambda data: self.proto.childDataReceived(1, data),
            self.outConnectionLost)

        self.stderr = _pollingfile._PollableReadPipe(
            self.hStderrR, lambda data: self.proto.childDataReceived(2, data),
            self.errConnectionLost)

        self.stdin = _pollingfile._PollableWritePipe(self.hStdinW,
                                                     self.inConnectionLost)

        for pipewatcher in self.stdout, self.stderr, self.stdin:
            self._addPollableResource(pipewatcher)

        # notify protocol
        self.proto.makeConnection(self)

        self._addPollableResource(_Reaper(self))
Exemplo n.º 9
0
    def __init__(self, reactor, protocol, command, args, environment, path):
        """
        Create a new child process.
        """
        _pollingfile._PollingTimer.__init__(self, reactor)
        BaseProcess.__init__(self, protocol)

        # security attributes for pipes
        sAttrs = win32security.SECURITY_ATTRIBUTES()
        sAttrs.bInheritHandle = 1

        # create the pipes which will connect to the secondary process
        self.hStdoutR, hStdoutW = win32pipe.CreatePipe(sAttrs, 0)
        self.hStderrR, hStderrW = win32pipe.CreatePipe(sAttrs, 0)
        hStdinR, self.hStdinW  = win32pipe.CreatePipe(sAttrs, 0)

        win32pipe.SetNamedPipeHandleState(self.hStdinW,
                                          win32pipe.PIPE_NOWAIT,
                                          None,
                                          None)

        # set the info structure for the new process.
        StartupInfo = win32process.STARTUPINFO()
        StartupInfo.hStdOutput = hStdoutW
        StartupInfo.hStdError  = hStderrW
        StartupInfo.hStdInput  = hStdinR
        StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES

        # Create new handles whose inheritance property is false
        currentPid = win32api.GetCurrentProcess()

        tmp = win32api.DuplicateHandle(currentPid, self.hStdoutR, currentPid, 0, 0,
                                       win32con.DUPLICATE_SAME_ACCESS)
        win32file.CloseHandle(self.hStdoutR)
        self.hStdoutR = tmp

        tmp = win32api.DuplicateHandle(currentPid, self.hStderrR, currentPid, 0, 0,
                                       win32con.DUPLICATE_SAME_ACCESS)
        win32file.CloseHandle(self.hStderrR)
        self.hStderrR = tmp

        tmp = win32api.DuplicateHandle(currentPid, self.hStdinW, currentPid, 0, 0,
                                       win32con.DUPLICATE_SAME_ACCESS)
        win32file.CloseHandle(self.hStdinW)
        self.hStdinW = tmp

        # Add the specified environment to the current environment - this is
        # necessary because certain operations are only supported on Windows
        # if certain environment variables are present.

        env = os.environ.copy()
        env.update(environment or {})
        newenv = {}
        for key, value in items(env):

            key = _fsdecode(key)
            value = _fsdecode(value)

            newenv[key] = value

        env = newenv

        # Make sure all the arguments are Unicode.
        args = [_fsdecode(x) for x in args]

        cmdline = quoteArguments(args)

        # The command, too, needs to be Unicode, if it is a value.
        command = _fsdecode(command) if command else command
        path = _fsdecode(path) if path else path

        # TODO: error detection here.  See #2787 and #4184.
        def doCreate():
            flags = win32con.CREATE_NO_WINDOW
            self.hProcess, self.hThread, self.pid, dwTid = win32process.CreateProcess(
                command, cmdline, None, None, 1, flags, env, path, StartupInfo)
        try:
            doCreate()
        except pywintypes.error as pwte:
            if not _invalidWin32App(pwte):
                # This behavior isn't _really_ documented, but let's make it
                # consistent with the behavior that is documented.
                raise OSError(pwte)
            else:
                # look for a shebang line.  Insert the original 'command'
                # (actually a script) into the new arguments list.
                sheb = _findShebang(command)
                if sheb is None:
                    raise OSError(
                        "%r is neither a Windows executable, "
                        "nor a script with a shebang line" % command)
                else:
                    args = list(args)
                    args.insert(0, command)
                    cmdline = quoteArguments(args)
                    origcmd = command
                    command = sheb
                    try:
                        # Let's try again.
                        doCreate()
                    except pywintypes.error as pwte2:
                        # d'oh, failed again!
                        if _invalidWin32App(pwte2):
                            raise OSError(
                                "%r has an invalid shebang line: "
                                "%r is not a valid executable" % (
                                    origcmd, sheb))
                        raise OSError(pwte2)

        # close handles which only the child will use
        win32file.CloseHandle(hStderrW)
        win32file.CloseHandle(hStdoutW)
        win32file.CloseHandle(hStdinR)

        # set up everything
        self.stdout = _pollingfile._PollableReadPipe(
            self.hStdoutR,
            lambda data: self.proto.childDataReceived(1, data),
            self.outConnectionLost)

        self.stderr = _pollingfile._PollableReadPipe(
                self.hStderrR,
                lambda data: self.proto.childDataReceived(2, data),
                self.errConnectionLost)

        self.stdin = _pollingfile._PollableWritePipe(
            self.hStdinW, self.inConnectionLost)

        for pipewatcher in self.stdout, self.stderr, self.stdin:
            self._addPollableResource(pipewatcher)

        # notify protocol
        self.proto.makeConnection(self)

        self._addPollableResource(_Reaper(self))
Exemplo n.º 10
0
    def _setupChild(self, fdmap):
        """
        fdmap[childFD] = parentFD

        The child wants to end up with 'childFD' attached to what used to be
        the parent's parentFD. As an example, a bash command run like
        'command 2>&1' would correspond to an fdmap of {0:0, 1:1, 2:1}.
        'command >foo.txt' would be {0:0, 1:os.open('foo.txt'), 2:2}.

        This is accomplished in two steps::

            1. close all file descriptors that aren't values of fdmap.  This
               means 0 .. maxfds (or just the open fds within that range, if
               the platform supports '/proc/<pid>/fd').

            2. for each childFD::

                 - if fdmap[childFD] == childFD, the descriptor is already in
                   place.  Make sure the CLOEXEC flag is not set, then delete
                   the entry from fdmap.

                 - if childFD is in fdmap.values(), then the target descriptor
                   is busy. Use os.dup() to move it elsewhere, update all
                   fdmap[childFD] items that point to it, then close the
                   original. Then fall through to the next case.

                 - now fdmap[childFD] is not in fdmap.values(), and is free.
                   Use os.dup2() to move it to the right place, then close the
                   original.
        """
        debug = self.debug_child
        if debug:
            errfd = sys.stderr
            errfd.write("starting _setupChild\n")

        destList = fdmap.values()
        for fd in _listOpenFDs():
            if fd in destList:
                continue
            if debug and fd == errfd.fileno():
                continue
            try:
                os.close(fd)
            except:
                pass

        # at this point, the only fds still open are the ones that need to
        # be moved to their appropriate positions in the child (the targets
        # of fdmap, i.e. fdmap.values() )

        if debug: print("fdmap", fdmap, file=errfd)
        for child in sorted(fdmap.keys()):
            target = fdmap[child]
            if target == child:
                # fd is already in place
                if debug: print("%d already in place" % target, file=errfd)
                fdesc._unsetCloseOnExec(child)
            else:
                if child in fdmap.values():
                    # we can't replace child-fd yet, as some other mapping
                    # still needs the fd it wants to target. We must preserve
                    # that old fd by duping it to a new home.
                    newtarget = os.dup(child) # give it a safe home
                    if debug: print("os.dup(%d) -> %d" % (child, newtarget),
                                    file=errfd)
                    os.close(child) # close the original
                    for c, p in items(fdmap):
                        if p == child:
                            fdmap[c] = newtarget # update all pointers
                # now it should be available
                if debug: print("os.dup2(%d,%d)" % (target, child), file=errfd)
                os.dup2(target, child)

        # At this point, the child has everything it needs. We want to close
        # everything that isn't going to be used by the child, i.e.
        # everything not in fdmap.keys(). The only remaining fds open are
        # those in fdmap.values().

        # Any given fd may appear in fdmap.values() multiple times, so we
        # need to remove duplicates first.

        old = []
        for fd in fdmap.values():
            if not fd in old:
                if not fd in fdmap.keys():
                    old.append(fd)
        if debug: print("old", old, file=errfd)
        for fd in old:
            os.close(fd)

        self._resetSignalDisposition()
Exemplo n.º 11
0
    def __init__(self,
                 reactor, executable, args, environment, path, proto,
                 uid=None, gid=None, childFDs=None):
        """
        Spawn an operating-system process.

        This is where the hard work of disconnecting all currently open
        files / forking / executing the new process happens.  (This is
        executed automatically when a Process is instantiated.)

        This will also run the subprocess as a given user ID and group ID, if
        specified.  (Implementation Note: this doesn't support all the arcane
        nuances of setXXuid on UNIX: it will assume that either your effective
        or real UID is 0.)
        """
        if not proto:
            assert 'r' not in childFDs.values()
            assert 'w' not in childFDs.values()
        _BaseProcess.__init__(self, proto)

        self.pipes = {}
        # keys are childFDs, we can sense them closing
        # values are ProcessReader/ProcessWriters

        helpers = {}
        # keys are childFDs
        # values are parentFDs

        if childFDs is None:
            childFDs = {0: "w", # we write to the child's stdin
                        1: "r", # we read from their stdout
                        2: "r", # and we read from their stderr
                        }

        debug = self.debug
        if debug: print("childFDs", childFDs)

        _openedPipes = []
        def pipe():
            r, w = os.pipe()
            _openedPipes.extend([r, w])
            return r, w

        # fdmap.keys() are filenos of pipes that are used by the child.
        fdmap = {} # maps childFD to parentFD
        try:
            for childFD, target in items(childFDs):
                if debug: print("[%d]" % childFD, target)
                if target == "r":
                    # we need a pipe that the parent can read from
                    readFD, writeFD = pipe()
                    if debug: print("readFD=%d, writeFD=%d" % (readFD, writeFD))
                    fdmap[childFD] = writeFD     # child writes to this
                    helpers[childFD] = readFD    # parent reads from this
                elif target == "w":
                    # we need a pipe that the parent can write to
                    readFD, writeFD = pipe()
                    if debug: print("readFD=%d, writeFD=%d" % (readFD, writeFD))
                    fdmap[childFD] = readFD      # child reads from this
                    helpers[childFD] = writeFD   # parent writes to this
                else:
                    assert type(target) == int, '%r should be an int' % (target,)
                    fdmap[childFD] = target      # parent ignores this
            if debug: print("fdmap", fdmap)
            if debug: print("helpers", helpers)
            # the child only cares about fdmap.values()

            self._fork(path, uid, gid, executable, args, environment, fdmap=fdmap)
        except:
            for pipe in _openedPipes:
                os.close(pipe)
            raise

        # we are the parent process:
        self.proto = proto

        # arrange for the parent-side pipes to be read and written
        for childFD, parentFD in items(helpers):
            os.close(fdmap[childFD])
            if childFDs[childFD] == "r":
                reader = self.processReaderFactory(reactor, self, childFD,
                                        parentFD)
                self.pipes[childFD] = reader

            if childFDs[childFD] == "w":
                writer = self.processWriterFactory(reactor, self, childFD,
                                        parentFD, forceReadHack=True)
                self.pipes[childFD] = writer

        try:
            # the 'transport' is used for some compatibility methods
            if self.proto is not None:
                self.proto.makeConnection(self)
        except:
            log.err()

        # The reactor might not be running yet.  This might call back into
        # processEnded synchronously, triggering an application-visible
        # callback.  That's probably not ideal.  The replacement API for
        # spawnProcess should improve upon this situation.
        registerReapProcessHandler(self.pid, self)