示例#1
0
 def __init__(self,
              fs,
              fsname="NTFS",
              volname="Dokan Volume",
              securityfolder=os.path.expanduser('~')):
     if libdokan is None:
         msg = 'dokan library (http://dokan-dev.github.io/) is not available'
         raise OSError(msg)
     self.fs = fs
     self.fsname = fsname
     self.volname = volname
     self.securityfolder = securityfolder
     self._files_by_handle = {}
     self._files_lock = threading.Lock()
     self._next_handle = MIN_FH
     #  Windows requires us to implement a kind of "lazy deletion", where
     #  a handle is marked for deletion but this is not actually done
     #  until the handle is closed.  This set monitors pending deletes.
     self._pending_delete = set()
     #  Since pyfilesystem has no locking API, we manage file locks
     #  in memory.  This maps paths to a list of current locks.
     self._active_locks = PathMap()
     #  Dokan expects a succesful write() to be reflected in the file's
     #  reported size, but the FS might buffer writes and prevent this.
     #  We explicitly keep track of the size Dokan expects a file to be.
     #  This dict is indexed by path, then file handle.
     self._files_size_written = PathMap()
示例#2
0
 def __init__(self, fs, on_init=None, on_destroy=None):
     self.fs = fs
     self._on_init = on_init
     self._on_destroy = on_destroy
     self._files_by_handle = {}
     self._files_lock = threading.Lock()
     self._next_handle = 1
     #  FUSE expects a succesful write() to be reflected in the file's
     #  reported size, but the FS might buffer writes and prevent this.
     #  We explicitly keep track of the size FUSE expects a file to be.
     #  This dict is indexed by path, then file handle.
     self._files_size_written = {}
示例#3
0
 def _reg_file(self, f, path):
     self._files_lock.acquire()
     try:
         fh = self._next_handle
         self._next_handle += 1
         lock = threading.Lock()
         self._files_by_handle[fh] = (f, path, lock)
         if path not in self._files_size_written:
             self._files_size_written[path] = {}
         self._files_size_written[path][fh] = 0
         return fh
     finally:
         self._files_lock.release()
示例#4
0
 def __init__(self, fs, max_size):
     super(LimitSizeFS, self).__init__(fs)
     if max_size < 0:
         try:
             max_size = fs.getmeta("total_space") + max_size
         except NotMetaError:
             msg = "FS doesn't report total_size; "\
                   "can't use negative max_size"
             raise ValueError(msg)
     self.max_size = max_size
     self._size_lock = threading.Lock()
     self._file_sizes = PathMap()
     self.cur_size = self._get_cur_size()
示例#5
0
    return wrapper


# During long-running operations, Dokan requires that the DokanResetTimeout
# function be called periodically to indicate the progress is still being
# made.  Unfortunately we don't have any facility for the underlying FS
# to make these calls for us, so we have to hack around it.
#
# The idea is to use a single background thread to monitor all active Dokan
# method calls, resetting the timeout until they have completed.  Note that
# this completely undermines the point of DokanResetTimeout as it's now
# possible for a deadlock to hang the entire filesystem.

_TIMEOUT_PROTECT_THREAD = None
_TIMEOUT_PROTECT_LOCK = threading.Lock()
_TIMEOUT_PROTECT_COND = threading.Condition(_TIMEOUT_PROTECT_LOCK)
_TIMEOUT_PROTECT_QUEUE = deque()
_TIMEOUT_PROTECT_WAIT_TIME = 4 * 60
_TIMEOUT_PROTECT_RESET_TIME = 5 * 60 * 1000


def _start_timeout_protect_thread():
    """Start the background thread used to protect dokan from timeouts.

    This function starts the background thread that monitors calls into the
    dokan API and resets their timeouts.  It's safe to call this more than
    once, only a single thread will be started.
    """
    global _TIMEOUT_PROTECT_THREAD
    with _TIMEOUT_PROTECT_LOCK:
示例#6
0
 def __setstate__(self, state):
     super(LimitSizeFS, self).__setstate__(state)
     self._size_lock = threading.Lock()
     self._file_sizes = PathMap()
     self.cur_size = self._get_cur_size()
示例#7
0
 def __setstate__(self, state):
     super(LimitSizeFS, self).__setstate__(state)
     self._size_lock = threading.Lock()
示例#8
0
 def __init__(self, fs, max_size):
     super(LimitSizeFS, self).__init__(fs)
     self.max_size = max_size
     self.cur_size = sum(self.getsize(f) for f in self.walkfiles())
     self._size_lock = threading.Lock()
     self._file_sizes = {}