Example #1
0
 def flush_buffer(fd):
     buf = buffers[fd]
     if buf:
         # flush buffer in case last line not terminated by '\n'
         header, level = loggers[fd]
         with sublevel_added(logger, header):
             logger.log(level, buf)
     del buffers[fd]
Example #2
0
 def flush_buffer(fd):
     buf = buffers[fd]
     if buf:
         # flush buffer in case last line not terminated by '\n'
         header, level = loggers[fd]
         with sublevel_added(logger, header):
             logger.log(level, buf)
     del buffers[fd]
Example #3
0
    def _log_process_with_logpipes(self, proc, stdout_to):
        # Weave together input from stdout, stderr, and any attached log
        # pipes.  To avoid any deadlocks with unbuffered stderr
        # interlaced with use of log pipe etc. we avoid readline(), but
        # instead use os.open to read and handle line-assembly ourselves...
        logger = self.logger

        stdout_fd, stderr_fd = proc.stdout.fileno(), proc.stderr.fileno()
        poller = select.poll()
        poller.register(stdout_fd)
        poller.register(stderr_fd)

        # Set up { fd : (logger, level) }
        loggers = {stdout_fd: (None, INFO), stderr_fd: (None, INFO)}
        buffers = {stdout_fd: '', stderr_fd: ''}

        # The FIFO pipes are a bit tricky as they need to the re-opened whenever
        # any client closes. This also modified the loggers dict and fd_to_logpipe
        # dict.

        fd_to_logpipe = {}  # stderr/stdout not re-opened

        def open_fifo(fifo_filename, header, level):
            # need to open in non-blocking mode to avoid waiting for printing client process
            fd = os.open(fifo_filename, os.O_NONBLOCK | os.O_RDONLY)
            # remove non-blocking after open to treat all streams uniformly in
            # the reading code
            fcntl.fcntl(fd, fcntl.F_SETFL, os.O_RDONLY)
            loggers[fd] = (header, level)
            buffers[fd] = ''
            fd_to_logpipe[fd] = fifo_filename
            poller.register(fd)

        def flush_buffer(fd):
            buf = buffers[fd]
            if buf:
                # flush buffer in case last line not terminated by '\n'
                header, level = loggers[fd]
                with sublevel_added(logger, header):
                    logger.log(level, buf)
            del buffers[fd]

        def close_fifo(fd):
            flush_buffer(fd)
            poller.unregister(fd)
            os.close(fd)
            del loggers[fd]
            del fd_to_logpipe[fd]

        def reopen_fifo(fd):
            fifo_filename = fd_to_logpipe[fd]
            header, level = loggers[fd]
            close_fifo(fd)
            open_fifo(fifo_filename, header, level)

        for (header, level), fifo_filename in self.log_fifo_filenames.items():
            open_fifo(fifo_filename, header, level)

        while True:
            # Python poll() doesn't return when SIGCHLD is received;
            # and there's the freak case where a process first
            # terminates stdout/stderr, then trying to write to a log
            # pipe, so we should track child termination the proper
            # way. Being in Python, it's easiest to just poll every
            # 50 ms; the majority of the time is spent in poll() so
            # it doesn't really increase log message latency
            events = poller.poll(50)
            if len(events) == 0:
                if proc.poll() is not None:
                    break  # child terminated
            for fd, reason in events:
                if reason & select.POLLHUP and not (reason & select.POLLIN):
                    # we want to continue receiving PULLHUP|POLLIN until all
                    # is read
                    if fd in fd_to_logpipe:
                        reopen_fifo(fd)
                    elif fd in (stdout_fd, stderr_fd):
                        poller.unregister(fd)
                elif reason & select.POLLIN:
                    if stdout_to is not None and fd == stdout_fd:
                        # Just forward
                        buf = os.read(fd, LOG_PIPE_BUFSIZE)
                        stdout_to.write(buf)
                    else:
                        # append new bytes to what's already been read on this fd; and
                        # emit any completed lines
                        new_bytes = os.read(fd, LOG_PIPE_BUFSIZE)
                        assert new_bytes != ''  # after all, we did poll
                        buffers[fd] += new_bytes
                        lines = buffers[fd].splitlines(True)  # keepends=True
                        if lines[-1][-1] != '\n':
                            buffers[fd] = lines[-1]
                            del lines[-1]
                        else:
                            buffers[fd] = ''
                        # have list of lines, emit them to logger
                        header, level = loggers[fd]
                        for line in lines:
                            line = line.rstrip('\n')
                            with sublevel_added(logger, header):
                                logger.log(level, line)

        flush_buffer(stderr_fd)
        flush_buffer(stdout_fd)
        for fd in fd_to_logpipe.keys():
            close_fifo(fd)

        retcode = proc.wait()
        return retcode
Example #4
0
    def _log_process_with_logpipes(self, proc, stdout_to):
        # Weave together input from stdout, stderr, and any attached log
        # pipes.  To avoid any deadlocks with unbuffered stderr
        # interlaced with use of log pipe etc. we avoid readline(), but
        # instead use os.open to read and handle line-assembly ourselves...
        logger = self.logger

        stdout_fd, stderr_fd = proc.stdout.fileno(), proc.stderr.fileno()
        poller = select.poll()
        poller.register(stdout_fd)
        poller.register(stderr_fd)

        # Set up { fd : (logger, level) }
        loggers = {stdout_fd: (None, INFO), stderr_fd: (None, INFO)}
        buffers = {stdout_fd: '', stderr_fd: ''}

        # The FIFO pipes are a bit tricky as they need to the re-opened whenever
        # any client closes. This also modified the loggers dict and fd_to_logpipe
        # dict.

        fd_to_logpipe = {} # stderr/stdout not re-opened

        def open_fifo(fifo_filename, header, level):
            # need to open in non-blocking mode to avoid waiting for printing client process
            fd = os.open(fifo_filename, os.O_NONBLOCK|os.O_RDONLY)
            # remove non-blocking after open to treat all streams uniformly in
            # the reading code
            fcntl.fcntl(fd, fcntl.F_SETFL, os.O_RDONLY)
            loggers[fd] = (header, level)
            buffers[fd] = ''
            fd_to_logpipe[fd] = fifo_filename
            poller.register(fd)

        def flush_buffer(fd):
            buf = buffers[fd]
            if buf:
                # flush buffer in case last line not terminated by '\n'
                header, level = loggers[fd]
                with sublevel_added(logger, header):
                    logger.log(level, buf)
            del buffers[fd]

        def close_fifo(fd):
            flush_buffer(fd)
            poller.unregister(fd)
            os.close(fd)
            del loggers[fd]
            del fd_to_logpipe[fd]

        def reopen_fifo(fd):
            fifo_filename = fd_to_logpipe[fd]
            header, level = loggers[fd]
            close_fifo(fd)
            open_fifo(fifo_filename, header, level)

        for (header, level), fifo_filename in self.log_fifo_filenames.items():
            open_fifo(fifo_filename, header, level)

        while True:
            # Python poll() doesn't return when SIGCHLD is received;
            # and there's the freak case where a process first
            # terminates stdout/stderr, then trying to write to a log
            # pipe, so we should track child termination the proper
            # way. Being in Python, it's easiest to just poll every
            # 50 ms; the majority of the time is spent in poll() so
            # it doesn't really increase log message latency
            events = poller.poll(50)
            if len(events) == 0:
                if proc.poll() is not None:
                    break # child terminated
            for fd, reason in events:
                if reason & select.POLLHUP and not (reason & select.POLLIN):
                    # we want to continue receiving PULLHUP|POLLIN until all
                    # is read
                    if fd in fd_to_logpipe:
                        reopen_fifo(fd)
                    elif fd in (stdout_fd, stderr_fd):
                        poller.unregister(fd)
                elif reason & select.POLLIN:
                    if stdout_to is not None and fd == stdout_fd:
                        # Just forward
                        buf = os.read(fd, LOG_PIPE_BUFSIZE)
                        stdout_to.write(buf)
                    else:
                        # append new bytes to what's already been read on this fd; and
                        # emit any completed lines
                        new_bytes = os.read(fd, LOG_PIPE_BUFSIZE)
                        assert new_bytes != '' # after all, we did poll
                        buffers[fd] += new_bytes
                        lines = buffers[fd].splitlines(True) # keepends=True
                        if lines[-1][-1] != '\n':
                            buffers[fd] = lines[-1]
                            del lines[-1]
                        else:
                            buffers[fd] = ''
                        # have list of lines, emit them to logger
                        header, level = loggers[fd]
                        for line in lines:
                            line = line.rstrip('\n')
                            with sublevel_added(logger, header):
                                logger.log(level, line)

        flush_buffer(stderr_fd)
        flush_buffer(stdout_fd)
        for fd in fd_to_logpipe.keys():
            close_fifo(fd)

        retcode = proc.wait()
        return retcode