def _close(self, async_=False): # We allow double-close without signaling an error, because real # Python file objects do. However, we must protect against actually # sending multiple CMD_CLOSE packets, because after we close our # handle, the same handle may be re-allocated by the server, and we # may end up mysteriously closing some random other file. (This is # especially important because we unconditionally call close() from # __del__.) if self._closed: return self.sftp._log(DEBUG, "close({})".format(u(hexlify(self.handle)))) if self.pipelined: self.sftp._finish_responses(self) BufferedFile.close(self) try: if async_: # GC'd file handle could be called from an arbitrary thread # -- don't wait for a response self.sftp._async_request(type(None), CMD_CLOSE, self.handle) else: self.sftp._request(CMD_CLOSE, self.handle) except EOFError: # may have outlived the Transport connection pass except (IOError, socket.error): # may have outlived the Transport connection pass
def _close(self, async_=False): # We allow double-close without signaling an error, because real # Python file objects do. However, we must protect against actually # sending multiple CMD_CLOSE packets, because after we close our # handle, the same handle may be re-allocated by the server, and we # may end up mysteriously closing some random other file. (This is # especially important because we unconditionally call close() from # __del__.) if self._closed: return self.sftp._log(DEBUG, 'close({})'.format(u(hexlify(self.handle)))) if self.pipelined: self.sftp._finish_responses(self) BufferedFile.close(self) try: if async_: # GC'd file handle could be called from an arbitrary thread # -- don't wait for a response self.sftp._async_request(type(None), CMD_CLOSE, self.handle) else: self.sftp._request(CMD_CLOSE, self.handle) except EOFError: # may have outlived the Transport connection pass except (IOError, socket.error): # may have outlived the Transport connection pass
def close(self): """ Close this channel file. The corresponging channel direction is shut down as well. """ BufferedFile.close(self) if self._flags & self.FLAG_WRITE: self.channel.shutdown_write() if self._flags & self.FLAG_READ: self.channel.shutdown_read()
class SFTPFile(BufferedFile): """ Proxy object for a file on the remote server, in client mode SFTP. Instances of this class may be used as context managers in the same way that built-in Python file objects are. """ # Some sftp servers will choke if you send read/write requests larger than # this size. MAX_REQUEST_SIZE = 32768 def __init__(self, sftp, handle, mode='r', bufsize=-1): BufferedFile.__init__(self) self.sftp = sftp self.handle = handle BufferedFile._set_mode(self, mode, bufsize) self.pipelined = False self._prefetching = False self._prefetch_done = False self._prefetch_data = {} self._prefetch_extents = {} self._prefetch_lock = threading.Lock() self._saved_exception = None self._reqs = deque() def __del__(self): self._close(async=True) def close(self): self._close(async=False) def _close(self, async=False): # We allow double-close without signaling an error, because real # Python file objects do. However, we must protect against actually # sending multiple CMD_CLOSE packets, because after we close our # handle, the same handle may be re-allocated by the server, and we # may end up mysteriously closing some random other file. (This is # especially important because we unconditionally call close() from # __del__.) if self._closed: return self.sftp._log(DEBUG, 'close(%s)' % hexlify(self.handle)) if self.pipelined: self.sftp._finish_responses(self) BufferedFile.close(self) try: if async: # GC'd file handle could be called from an arbitrary thread -- don't wait for a response self.sftp._async_request(type(None), CMD_CLOSE, self.handle) else: self.sftp._request(CMD_CLOSE, self.handle) except EOFError: # may have outlived the Transport connection pass except (IOError, socket.error): # may have outlived the Transport connection pass