Esempio n. 1
0
    def __init__(self, rpipe, wpipe=None, auto_close=False):
        """Pipe-based stream

        NOTE: reading from or writing to files, use os.open to get the file
        descriptor instead of python's open. Socket file descriptors and
        others are fine.

        when you use os.pipe to generate one write pipe and one read pipe, you
        need to pass both of them into init method.

        :param rpipe: an integer file descriptor which supports read ops
        :param wpipe: an integer file descriptor which supports write ops
        :param auto: flag to indicate to close the stream automatically or not
        """
        assert rpipe is not None
        self._rpipe = rpipe
        self._wpipe = wpipe

        self._rs = (PipeIOStream(self._rpipe)
                    if self._rpipe is not None else None)
        self._ws = (PipeIOStream(self._wpipe)
                    if self._wpipe is not None else None)
        self.auto_close = auto_close
        self.state = StreamState.init

        self.exception = None
Esempio n. 2
0
 def __enter__(self):
     # Create file
     os.open(self.fname, os.O_CREAT)
     # Create stream
     fd = os.open(self.fname, os.O_WRONLY)
     self.stream = PipeIOStream(fd)
     return self.stream
Esempio n. 3
0
 def __init__(self, *args, **kwargs):
     self.io_loop = kwargs.pop('io_loop', None)
     to_close = []
     if kwargs.get('stdin') is Subprocess.STREAM:
         in_r, in_w = os.pipe()
         kwargs['stdin'] = in_r
         to_close.append(in_r)
         self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
     if kwargs.get('stdout') is Subprocess.STREAM:
         out_r, out_w = os.pipe()
         kwargs['stdout'] = out_w
         to_close.append(out_w)
         self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
     if kwargs.get('stderr') is Subprocess.STREAM:
         err_r, err_w = os.pipe()
         kwargs['stderr'] = err_w
         to_close.append(err_w)
         self.stdout = PipeIOStream(err_r, io_loop=self.io_loop)
     self.proc = subprocess.Popen(*args, **kwargs)
     for fd in to_close:
         os.close(fd)
     for attr in ['stdin', 'stdout', 'stderr', 'pid']:
         if not hasattr(self, attr):  # don't clobber streams set above
             setattr(self, attr, getattr(self.proc, attr))
     self._exit_callback = None
     self.returncode = None
    def call_process(self, cmd, stream, address, io_loop=None): 
        """ Calls process 

        cmd: command in a list e.g ['ls', '-la']
        stdout_callback: callback to run on stdout


        TODO: add some way of calling proc.kill() if the stream is closed
        """

        stdout_stream = Subprocess.STREAM 
        stderr_stream = Subprocess.STREAM 
        proc = Subprocess(cmd, stdout=stdout_stream, stderr=stderr_stream)
        call_back = partial(self.on_exit, address)
        proc.set_exit_callback(call_back)

        pipe_stream = PipeIOStream(proc.stdout.fileno())

        try:
            while True:
                str_ = yield pipe_stream.read_bytes(102400, partial=True)
                yield stream.write(str_)
        except StreamClosedError:
            pass
        print("end address: {}".format(address))
Esempio n. 5
0
class _ContentHandler(tornado.web.RequestHandler):
    SUPPORTED_METHODS = ['GET']

    @tornado.web.asynchronous
    @gen.coroutine
    def get(self, path):
        try:
            content = self.content(path)
            self.set_header('Content-Type', content.mime)
            if content.has_file() and os.name != 'nt':
                self.stream = PipeIOStream(content.open_fd())
                self.stream.read_until_close(callback=self.on_file_end,
                                             streaming_callback=self.on_chunk)
            else:
                self.finish(content.get_data())
        except NotAuthorizedError:
            self.write(exception_message())
            self.send_error(403)
        except FileNotFoundError:
            self.send_error(404)
        except:
            log.exception('error')
            self.send_error(500)

    def on_file_end(self, s):
        if s:
            self.write(s)
        self.finish()  # close connection

    def on_chunk(self, chunk):
        self.write(chunk)
        self.flush()
Esempio n. 6
0
 def __init__(self, stdin=sys.stdin, stdout=sys.stdout, context: dict={}):
     """
     Create a new shell.
     :param stdin: file handle of the stdandard input
     :param stdout: file handle of the standard output
     :param context: exposed variables
     """
     self.stdin = PipeIOStream(stdin.fileno())
     self.stdout = PipeIOStream(stdout.fileno())
     self.input_buffer = []
     self.running = False
     self.context = context
Esempio n. 7
0
class WriteFileHandler(web.RequestHandler):
    @web.asynchronous
    def get(self):
        print 'handler begin at %s' % datetime.now()
        self.f = open('test.data', 'w')
        self.stream = PipeIOStream(self.f.fileno())
        self.stream.write(test_data, self.callback)
        print 'handler async write at %s' % datetime.now()

    def callback(self):
        self.f.close()
        self.finish()
        print 'handler complete at %s' % datetime.now()
Esempio n. 8
0
    def __init__(self, stdin=sys.stdin, stdout=sys.stdout, context: dict={}):
        """
        Create a new shell.

        :param stdin: file handle of the stdandard input
        :param stdout: file handle of the standard output
        :param context: exposed variables
        """
        self.stdin = PipeIOStream(stdin.fileno())
        self.stdout = PipeIOStream(stdout.fileno())
        self.input_buffer = []
        self.running = False
        self.context = context
Esempio n. 9
0
    def test_pipe_iostream_big_write(self):
        r, w = os.pipe()

        rs = PipeIOStream(r, io_loop=self.io_loop)
        ws = PipeIOStream(w, io_loop=self.io_loop)

        NUM_BYTES = 1048576

        # Write 1MB of data, which should fill the buffer
        ws.write(b"1" * NUM_BYTES)

        rs.read_bytes(NUM_BYTES, self.stop)
        data = self.wait()
        self.assertEqual(data, b"1" * NUM_BYTES)

        ws.close()
        rs.close()
Esempio n. 10
0
 def get(self, path):
     try:
         content = self.content(path)
         self.set_header('Content-Type', content.mime)
         if content.has_file() and os.name != 'nt':
             self.stream = PipeIOStream(content.open_fd())
             self.stream.read_until_close(callback=self.on_file_end,
                                          streaming_callback=self.on_chunk)
         else:
             self.finish(content.get_data())
     except NotAuthorizedError:
         self.write(exception_message())
         self.send_error(403)
     except FileNotFoundError:
         self.send_error(404)
     except:
         log.exception('error')
         self.send_error(500)
Esempio n. 11
0
class SafeFileIOStream:
    def __init__(self, fname):
        self.fname = fname

        files_dir = os.path.dirname(fname)
        if not os.path.exists(files_dir):
            os.mkdir(files_dir)

    def __enter__(self):
        # Create file
        os.open(self.fname, os.O_CREAT)
        # Create stream
        fd = os.open(self.fname, os.O_WRONLY)
        self.stream = PipeIOStream(fd)
        return self.stream

    def __exit__(self, exc_type, exc_val, exc_tb):
        # Close stream
        self.stream.close()
Esempio n. 12
0
 def __init__(self, *args: Any, **kwargs: Any) -> None:
     self.io_loop = ioloop.IOLoop.current()
     # All FDs we create should be closed on error; those in to_close
     # should be closed in the parent process on success.
     pipe_fds = []  # type: List[int]
     to_close = []  # type: List[int]
     if kwargs.get("stdin") is Subprocess.STREAM:
         in_r, in_w = _pipe_cloexec()
         kwargs["stdin"] = in_r
         pipe_fds.extend((in_r, in_w))
         to_close.append(in_r)
         self.stdin = PipeIOStream(in_w)
     if kwargs.get("stdout") is Subprocess.STREAM:
         out_r, out_w = _pipe_cloexec()
         kwargs["stdout"] = out_w
         pipe_fds.extend((out_r, out_w))
         to_close.append(out_w)
         self.stdout = PipeIOStream(out_r)
     if kwargs.get("stderr") is Subprocess.STREAM:
         err_r, err_w = _pipe_cloexec()
         kwargs["stderr"] = err_w
         pipe_fds.extend((err_r, err_w))
         to_close.append(err_w)
         self.stderr = PipeIOStream(err_r)
     try:
         self.proc = subprocess.Popen(*args, **kwargs)
     except:
         for fd in pipe_fds:
             os.close(fd)
         raise
     for fd in to_close:
         os.close(fd)
     self.pid = self.proc.pid
     for attr in ["stdin", "stdout", "stderr"]:
         if not hasattr(self, attr):  # don't clobber streams set above
             setattr(self, attr, getattr(self.proc, attr))
     self._exit_callback = None  # type: Optional[Callable[[int], None]]
     self.returncode = None  # type: Optional[int]
Esempio n. 13
0
 def __init__(self, *args, **kwargs):
     self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current()
     # All FDs we create should be closed on error; those in to_close
     # should be closed in the parent process on success.
     pipe_fds = []
     to_close = []
     if kwargs.get('stdin') is Subprocess.STREAM:
         in_r, in_w = _pipe_cloexec()
         kwargs['stdin'] = in_r
         pipe_fds.extend((in_r, in_w))
         to_close.append(in_r)
         self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
     if kwargs.get('stdout') is Subprocess.STREAM:
         out_r, out_w = _pipe_cloexec()
         kwargs['stdout'] = out_w
         pipe_fds.extend((out_r, out_w))
         to_close.append(out_w)
         self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
     if kwargs.get('stderr') is Subprocess.STREAM:
         err_r, err_w = _pipe_cloexec()
         kwargs['stderr'] = err_w
         pipe_fds.extend((err_r, err_w))
         to_close.append(err_w)
         self.stderr = PipeIOStream(err_r, io_loop=self.io_loop)
     try:
         self.proc = subprocess.Popen(*args, **kwargs)
     except:
         for fd in pipe_fds:
             os.close(fd)
         raise
     for fd in to_close:
         os.close(fd)
     for attr in ['stdin', 'stdout', 'stderr', 'pid']:
         if not hasattr(self, attr):  # don't clobber streams set above
             setattr(self, attr, getattr(self.proc, attr))
     self._exit_callback = None
     self.returncode = None
Esempio n. 14
0
    def __init__(self, rpipe, wpipe=None, auto_close=False):
        """Pipe-based stream

        NOTE: reading from or writing to files, use os.open to get the file
        descriptor instead of python's open. Socket file descriptors and
        others are fine.

        when you use os.pipe to generate one write pipe and one read pipe, you
        need to pass both of them into init method.

        :param rpipe: an integer file descriptor which supports read ops
        :param wpipe: an integer file descriptor which supports write ops
        :param auto: flag to indicate to close the stream automatically or not
        """
        assert rpipe is not None
        self._rpipe = rpipe
        self._wpipe = wpipe

        self._rs = PipeIOStream(self._rpipe) if self._rpipe is not None else None
        self._ws = PipeIOStream(self._wpipe) if self._wpipe is not None else None
        self.auto_close = auto_close
        self.state = StreamState.init

        self.exception = None
Esempio n. 15
0
class Shell(object):
    def __init__(self, stdin=sys.stdin, stdout=sys.stdout, context: dict={}):
        """
        Create a new shell.

        :param stdin: file handle of the stdandard input
        :param stdout: file handle of the standard output
        :param context: exposed variables
        """
        self.stdin = PipeIOStream(stdin.fileno())
        self.stdout = PipeIOStream(stdout.fileno())
        self.input_buffer = []
        self.running = False
        self.context = context

    def start(self):
        self.running = True
        self.stdout.write(b"\r$>")
        self.stdin.read_until(b'\n', self.on_line)

    def on_line(self, chunk_bytes: bytes):
        chunk = chunk_bytes.decode('utf-8', errors='ignore').rstrip('\n')
        if not chunk.endswith('\\'):
            self.input_buffer.append(chunk.strip())
            line = " ".join(self.input_buffer)
            self.input_buffer.clear()
            self.on_command(line)
        else:
            self.input_buffer.append(chunk[:-1].strip())
            self.stdout.write(b"\r  ")
        if self.running:
            self.start()

    @tornado.gen.engine
    def on_command(self, command):
        try:
            if command:
                code = compile(command + '\n', '<shell>', 'single')
                res = eval(code, self.context)
                if res is not None:
                    r = pprint.pformat(res).encode('utf-8')
                    yield tornado.gen.Task(self.stdout.write, r + b'\n')
        except SystemExit:
            raise
        except:
            yield tornado.gen.Task(self.stdout.write, traceback.format_exc().encode('utf-8'))
Esempio n. 16
0
    def test_pipe_iostream_big_write(self):
        r, w = os.pipe()

        rs = PipeIOStream(r, io_loop=self.io_loop)
        ws = PipeIOStream(w, io_loop=self.io_loop)

        NUM_BYTES = 1048576

        # Write 1MB of data, which should fill the buffer
        ws.write(b"1" * NUM_BYTES)

        rs.read_bytes(NUM_BYTES, self.stop)
        data = self.wait()
        self.assertEqual(data, b"1" * NUM_BYTES)

        ws.close()
        rs.close()
Esempio n. 17
0
    def test_pipe_iostream(self):
        r, w = os.pipe()

        rs = PipeIOStream(r, io_loop=self.io_loop)
        ws = PipeIOStream(w, io_loop=self.io_loop)

        ws.write(b"hel")
        ws.write(b"lo world")

        rs.read_until(b' ', callback=self.stop)
        data = self.wait()
        self.assertEqual(data, b"hello ")

        rs.read_bytes(3, self.stop)
        data = self.wait()
        self.assertEqual(data, b"wor")

        ws.close()

        rs.read_until_close(self.stop)
        data = self.wait()
        self.assertEqual(data, b"ld")

        rs.close()
Esempio n. 18
0
    def __init__(self,
                 command,
                 timeout=-1,
                 stdout_chunk_callback=None,
                 stderr_chunk_callback=None,
                 exit_process_callback=None,
                 stdin_bytes=None,
                 io_loop=None,
                 kill_on_timeout=False):
        """
        Initializes the subprocess with callbacks and timeout.

        :param command: command like ['java', '-jar', 'test.jar']
        :param timeout: timeout for subprocess to complete, if negative or zero then no timeout
        :param stdout_chunk_callback: callback(bytes_data_chuck_from_stdout)
        :param stderr_chunk_callback: callback(bytes_data_chuck_from_stderr)
        :param exit_process_callback: callback(exit_code, was_expired_by_timeout)
        :param stdin_bytes: bytes data to send to stdin
        :param io_loop: tornado io loop on None for current
        :param kill_on_timeout: kill(-9) or terminate(-15)?
        """
        self.aa_exit_process_callback = exit_process_callback
        self.aa_kill_on_timeout = kill_on_timeout
        stdin = Subprocess.STREAM if stdin_bytes else None
        stdout = Subprocess.STREAM if stdout_chunk_callback else None
        stderr = Subprocess.STREAM if stderr_chunk_callback else None

        Subprocess.__init__(self,
                            command,
                            stdin=stdin,
                            stdout=stdout,
                            stderr=stderr,
                            io_loop=io_loop,
                            shell=True)

        self.aa_process_expired = False
        self.aa_terminate_timeout = self.io_loop.call_later(
            timeout, self.aa_timeout_callback) if timeout > 0 else None

        self.set_exit_callback(self.aa_exit_callback)

        if stdin:
            self.stdin.write(stdin_bytes)
            self.stdin.close()

        if stdout:
            output_stream = PipeIOStream(self.stdout.fileno())

            def on_stdout_chunk(data):
                stdout_chunk_callback(data)
                if not output_stream.closed():
                    output_stream.read_bytes(102400, on_stdout_chunk, None,
                                             True)

            output_stream.read_bytes(102400, on_stdout_chunk, None, True)

        if stderr:
            stderr_stream = PipeIOStream(self.stderr.fileno())

            def on_stderr_chunk(data):
                stdout_chunk_callback(data)
                if not stderr_stream.closed():
                    stderr_stream.read_bytes(102400, on_stderr_chunk, None,
                                             True)

            stderr_stream.read_bytes(102400, on_stderr_chunk, None, True)
Esempio n. 19
0
 def _create_streams(self):
     read_fd, write_fd = os.pipe()
     write_stream = PipeIOStream(write_fd, io_loop=self.io_loop)
     read_stream = PipeIOStream(read_fd, io_loop=self.io_loop)
     return (write_stream, read_stream)
Esempio n. 20
0
    def __init__(
                self,
                command,
                timeout=-1,
                stdout_chunk_callback=None,
                stderr_chunk_callback=None,
                exit_process_callback=None,
                stdin_bytes=None,
                io_loop=None,
                kill_on_timeout=False
                ):
        """
        Initializes the subprocess with callbacks and timeout.

        :param command: command like ['java', '-jar', 'test.jar']
        :param timeout: timeout for subprocess to complete, if negative or \
        zero then no timeout
        :param stdout_chunk_callback: callback(bytes_data_chuck_from_stdout)
        :param stderr_chunk_callback: callback(bytes_data_chuck_from_stderr)
        :param exit_process_callback: callback(exit_code, \
        was_expired_by_timeout)
        :param stdin_bytes: bytes data to send to stdin
        :param io_loop: tornado io loop on None for current
        :param kill_on_timeout: kill(-9) or terminate(-15)?
        """
        self.aa_exit_process_callback = exit_process_callback
        self.aa_kill_on_timeout = kill_on_timeout
        stdin = Subprocess.STREAM if stdin_bytes else None
        stdout = Subprocess.STREAM if stdout_chunk_callback else None
        stderr = Subprocess.STREAM if stderr_chunk_callback else None

        Subprocess.__init__(self, command, stdin=stdin, stdout=stdout,
                            stderr=stderr, io_loop=io_loop)

        self.aa_process_expired = False
        self.aa_terminate_timeout = self.io_loop.call_later(
            timeout, self.aa_timeout_callback) if timeout > 0 else None

        self.set_exit_callback(self.aa_exit_callback)

        if stdin:
            self.stdin.write(stdin_bytes)
            self.stdin.close()

        if stdout:
            output_stream = PipeIOStream(self.stdout.fileno())

            def on_stdout_chunk(data):
                stdout_chunk_callback(data)
                if not output_stream.closed():
                    output_stream.read_bytes(102400,
                                             on_stdout_chunk, None, True)

            output_stream.read_bytes(102400, on_stdout_chunk, None, True)

        if stderr:
            stderr_stream = PipeIOStream(self.stderr.fileno())

            def on_stderr_chunk(data):
                stderr_chunk_callback(data)
                if not stderr_stream.closed():
                    stderr_stream.read_bytes(102400,
                                             on_stderr_chunk, None, True)

            stderr_stream.read_bytes(102400, on_stderr_chunk, None, True)
Esempio n. 21
0
    def test_pipe_iostream(self):
        r, w = os.pipe()

        rs = PipeIOStream(r, io_loop=self.io_loop)
        ws = PipeIOStream(w, io_loop=self.io_loop)

        ws.write(b"hel")
        ws.write(b"lo world")

        rs.read_until(b" ", callback=self.stop)
        data = self.wait()
        self.assertEqual(data, b"hello ")

        rs.read_bytes(3, self.stop)
        data = self.wait()
        self.assertEqual(data, b"wor")

        ws.close()

        rs.read_until_close(self.stop)
        data = self.wait()
        self.assertEqual(data, b"ld")

        rs.close()
Esempio n. 22
0
    def make_iostream_pair(self, **kwargs):
        r, w = os.pipe()

        return PipeIOStream(r, **kwargs), PipeIOStream(w, **kwargs)
Esempio n. 23
0
 def get(self):
     print 'handler begin at %s' % datetime.now()
     self.f = open('test.data', 'w')
     self.stream = PipeIOStream(self.f.fileno())
     self.stream.write(test_data, self.callback)
     print 'handler async write at %s' % datetime.now()
Esempio n. 24
0
class PipeStream(Stream):
    def __init__(self, rpipe, wpipe=None, auto_close=False):
        """Pipe-based stream

        NOTE: reading from or writing to files, use os.open to get the file
        descriptor instead of python's open. Socket file descriptors and
        others are fine.

        when you use os.pipe to generate one write pipe and one read pipe, you
        need to pass both of them into init method.

        :param rpipe: an integer file descriptor which supports read ops
        :param wpipe: an integer file descriptor which supports write ops
        :param auto: flag to indicate to close the stream automatically or not
        """
        assert rpipe is not None
        self._rpipe = rpipe
        self._wpipe = wpipe

        self._rs = PipeIOStream(self._rpipe) if self._rpipe is not None else None
        self._ws = PipeIOStream(self._wpipe) if self._wpipe is not None else None
        self.auto_close = auto_close
        self.state = StreamState.init

        self.exception = None

    @tornado.gen.coroutine
    def read(self):
        if self.exception:
            raise self.exception

        if self.state == StreamState.completed or self._rpipe is None:
            raise tornado.gen.Return("")
        elif self.state == StreamState.init:
            self.state = StreamState.streaming

        chunk = ""
        try:
            chunk = yield self._rs.read_bytes(common.MAX_PAYLOAD_SIZE, partial=True)

        except StreamClosedError:
            # reach the end of the pipe stream
            self.state = StreamState.completed
        finally:
            if self.exception:
                raise self.exception
            raise tornado.gen.Return(chunk)

    @tornado.gen.coroutine
    def write(self, chunk):
        assert self._wpipe is not None
        if self.exception:
            raise self.exception

        try:
            yield self._ws.write(chunk)
            self.state = StreamState.streaming
        except StreamClosedError:
            self.state = StreamState.completed
            raise UnexpectedError("Stream has been closed.")
        finally:
            if self.exception:
                raise self.exception

    def set_exception(self, exception):
        self.exception = exception
        self.close()

    def close(self):
        self.state = StreamState.completed
        if self._ws and self.auto_close:
            self._ws.close()

        if self._rs and self.auto_close:
            self._rs.close()