def main(): options = parse_args() sys.stderr.write('\n' * ddrescue_pollution) sys.stderr.flush() term_up = '\x1B[A' clear_to_eol = '\x1B[K' logging.basicConfig( format='\r' + (term_up * ddrescue_pollution) + '%(asctime)-23s %(levelname)-7s %(name)s: %(message)s' + clear_to_eol + ('\n' * ddrescue_pollution)) if options.debug: logging.getLogger().setLevel(logging.DEBUG) logging.getLogger().debug('Debug logging enabled') else: logging.getLogger().setLevel(logging.INFO) fs = DDRescueFS(options) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=ddrescuefs') if options.debug_fuse: fuse_options.add('debug') llfuse.init(fs, options.mountpoint, fuse_options) try: llfuse.main(workers=1) except: llfuse.close(unmount=False) raise llfuse.close()
def mount(mountpoint, file_mappings, background=True): server = DXFuse(file_mappings) llfuse.init(server, mountpoint, [ b'fsname=dxfuse', b'subtype=dnanexus', b'ro' ]) if background: daemonize('/') llfuse.main() llfuse.close()
def _llfuse_main(self): try: llfuse.main() except: llfuse.close(unmount=False) raise llfuse.close()
def mount(self, mountpoint, mount_options, foreground=False): """Mount filesystem on *mountpoint* with *mount_options*.""" options = ['fsname=borgfs', 'ro'] if mount_options: options.extend(mount_options.split(',')) try: options.remove('allow_damaged_files') self.allow_damaged_files = True except ValueError: pass try: options.remove('versions') self.versions = True except ValueError: pass self._create_filesystem() llfuse.init(self, mountpoint, options) if not foreground: daemonize() # If the file system crashes, we do not want to umount because in that # case the mountpoint suddenly appears to become empty. This can have # nasty consequences, imagine the user has e.g. an active rsync mirror # job - seeing the mountpoint empty, rsync would delete everything in the # mirror. umount = False try: signal = fuse_main() umount = (signal is None) # no crash and no signal -> umount request finally: llfuse.close(umount)
def mount(operations, mountpoint, options=None, *, override_default_options=False, workers=30): """Mount a file system. Args ---- operations: `~.Operations` The operations handler for the file system. mountpoint: str The directory on which the file system should be mounted. options: set A set of options that should be used when mounting. override_default_options: bool If this is set to `True` only the supplied options will be used. Otherwise the options will be added to the defaults. The defaults are the defaults supplied by `llfuse.default_options`. workers: int The amount of worker threads that should be spawned to handle the file operations. """ operations.mountpoint = os.path.abspath(mountpoint) if options is None: options = llfuse.default_options elif not override_default_options: options |= llfuse.default_options llfuse.init(operations, mountpoint, options) try: llfuse.main(workers=workers) finally: llfuse.close()
def main(): options = parse_args(sys.argv[1:]) init_logging(options.debug) operations = Operations(options.source) log.debug("Mounting...") fuse_options = set(llfuse.default_options) fuse_options.add("fsname=passthroughfs") fuse_options.add("default_permissions") if options.debug_fuse: fuse_options.add("debug") llfuse.init(operations, options.mountpoint, fuse_options) try: log.debug("Entering main loop..") if options.single: llfuse.main(workers=1) else: llfuse.main() except: llfuse.close(unmount=False) raise log.debug("Unmounting..") llfuse.close()
def onSignalUmount(self, signum=None, frame=None, error=False): if signum: # Real signal, try to umount FS self.getLogger().warning("Catch signal %r! Try to umount FS!" % signum) try: ret = subprocess.Popen(["umount", self.mountpoint]).wait() if ret == 0: return except: pass # Or simulate umount on errors try: if error: self.getLogger().warning("Try to umount FS manual!") fuse.close(False) else: fuse.close(True) except: pass try: subprocess.Popen(["umount", "-l", self.mountpoint]).wait() except: pass self.postDestroy() return
def main(): options = parse_args(sys.argv[1:]) init_logging(options.debug) operations = Operations(options.source) log.debug('Mounting...') fuse_options = set(llfuse.default_options) fuse_options.add('fsname=passthroughfs') fuse_options.add('default_permissions') if options.debug_fuse: fuse_options.add('debug') llfuse.init(operations, options.mountpoint, fuse_options) try: log.debug('Entering main loop..') if options.single: llfuse.main(workers=1) else: llfuse.main() except: llfuse.close(unmount=False) raise log.debug('Unmounting..') llfuse.close()
def onSignalUmount(self, signum=None, frame=None, error=False): if signum: # Real signal, try to umount FS self.getLogger().warning("Catch signal %r! Try to umount FS!", signum) try: ret = subprocess.Popen(["umount", self.mountpoint]).wait() if ret == 0: return except: pass # Or simulate umount on errors try: if error: self.getLogger().warning("Try to umount FS manual!") fuse.close(False) else: fuse.close(True) except: pass try: subprocess.Popen(["umount", "-l", self.mountpoint]).wait() except: pass self.postDestroy() return
def mount(self, mountpoint, mount_options, foreground=False): """Mount filesystem on *mountpoint* with *mount_options*.""" options = ['fsname=borgfs', 'ro'] if mount_options: options.extend(mount_options.split(',')) try: options.remove('allow_damaged_files') self.allow_damaged_files = True except ValueError: pass try: options.remove('versions') self.versions = True except ValueError: pass self._create_filesystem() llfuse.init(self, mountpoint, options) if not foreground: daemonize() # If the file system crashes, we do not want to umount because in that # case the mountpoint suddenly appears to become empty. This can have # nasty consequences, imagine the user has e.g. an active rsync mirror # job - seeing the mountpoint empty, rsync would delete everything in the # mirror. umount = False try: signal = fuse_main() # no crash and no signal (or it's ^C and we're in the foreground) -> umount request umount = (signal is None or (signal == SIGINT and foreground)) finally: llfuse.close(umount)
def main(): options = parse_args(sys.argv[1:]) init_logging(options.debug) operations = Operations(options.source, { "jquery.min.js": b"/usr/share/javascript/jquery/jquery.min.js" }) log.debug('Mounting...') fuse_options = set(llfuse.default_options) fuse_options.add('fsname=passthroughfs') fuse_options.add('default_permissions') if options.debug_fuse: fuse_options.add('debug') llfuse.init(operations, options.mountpoint, fuse_options) try: log.debug('Entering main loop..') if options.single: llfuse.main(workers=1) else: llfuse.main() except: llfuse.close(unmount=False) raise log.debug('Unmounting..') llfuse.close()
def mount(mountpoint, file_mappings, background=True): server = DXFuse(file_mappings) llfuse.init(server, mountpoint, [b'fsname=dxfuse', b'subtype=dnanexus', b'ro']) if background: daemonize('/') llfuse.main() llfuse.close()
def run_fuse_mount(ops, options, mount_opts): mount_opts = ['fsname=gridfs_fuse'] + mount_opts llfuse.init(ops, options.mount_point, mount_opts) try: llfuse.main(single=True) finally: llfuse.close()
def mainloop(): ''' Worker main loop to run the filesystem then tidy up. ''' with stackattrs(defaults, fs=fs): with S: with defaults.common_S(S): llfuse.main(workers=32) llfuse.close() S.close()
def _llfuse_worker(self, fs, mountpoint, init_event): fuse_options = set(llfuse.default_options) fuse_options.add('fsname=marty') llfuse.init(fs, mountpoint, fuse_options) init_event.set() try: llfuse.main(workers=1) finally: llfuse.close()
def run_fs(mountpoint, cross_process): testfs = Fs(cross_process) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=llfuse_testfs') llfuse.init(testfs, mountpoint, fuse_options) try: llfuse.main(workers=1) finally: llfuse.close()
def main(scanner): if scanner: scanner.start() try: llfuse.main() except Exception: log.exception("Error in FuseFS") finally: llfuse.close()
def mount(self, mountpoint, extra_options, foreground=False): options = ['fsname=borgfs', 'ro'] if extra_options: options.extend(extra_options.split(',')) llfuse.init(self, mountpoint, options) if not foreground: daemonize() try: llfuse.main(single=True) finally: llfuse.close()
def main(): ops = TestOperations() fuse_options = set(llfuse.default_options) fuse_options.add('fsname=test_fs') fuse_options.discard('nonempty') fuse_options.add('max_read=60000') # fuse_options.add('debug') llfuse.init(ops, MOUNTPOINT, fuse_options) r = llfuse.main() if r is None: llfuse.close()
def __init__(self, rootpath): MOUNT_POINT = "/tmp/mountpoint" ops = FuseOperations(rootpath) fuse_options = set(llfuse.default_options) fuse_options.add("fsname=testfs") fuse_options.discard("default_permissions") j.sal.fs.createDir(MOUNT_POINT) llfuse.init(ops, MOUNT_POINT, fuse_options) try: llfuse.main(workers=1) except BaseException: llfuse.close(unmount=False) raise llfuse.close()
def run(self, debug=False): # Setup our fuse interaction, but don't process requests yet. opts = ['fsname=thingfs', 'nonempty'] if debug: opts.append('debug') llfuse.init(self, self.mount_path, opts) self.ready_ = True try: llfuse.main(workers=1) except: llfuse.close(unmount=False) raise llfuse.close()
def mount(self, log=True): self.init_logging(log) fs = zfsfuse(self.dataset) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=zfsrescue') #fuse_options.add('debug') llfuse.init(fs, self.mountpoint, fuse_options) try: llfuse.main(workers=1) except Exception as e: print(str(e)) llfuse.close(unmount=False) raise e llfuse.close()
def run(): fs = Lisk() fw = FileWorker() llfuse.init(fs, "/home/heidar/fs", [b"fsname=lisk", b"nonempty"]) print "here" print llfuse.ROOT_INODE fw.start() try: llfuse.main(single=True) except: llfuse.close() raise llfuse.close()
def main(): options = parse_args() init_logging(options.debug) testfs = TestFs() fuse_options = set(llfuse.default_options) fuse_options.add('fsname=lltest') llfuse.init(testfs, options.mountpoint, fuse_options) try: llfuse.main(single=True) except: llfuse.close(unmount=False) raise llfuse.close()
def main(): options = parse_args() init_logging(options.debug) testfs = JsonSysClassFS(TestJscfsMethods.test_json_str) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=jscfs') if options.debug_fuse: fuse_options.add('debug') llfuse.init(testfs, options.mountpoint, fuse_options) try: llfuse.main(workers=1) except: llfuse.close(unmount=False) raise llfuse.close()
def mount(self,stream,mountpt,foreground=False,debug=False): mountpt = os.path.abspath(mountpt) ops = Operations(stream,self) args = ['fsname=u4pak', 'subtype=u4pak', 'ro'] if debug: foreground = True args.append('debug') if not foreground: deamonize() llfuse.init(ops, mountpt, args) try: llfuse.main() finally: llfuse.close()
def mount(self,stream,mountpt,foreground=False,debug=False): mountpt = os.path.abspath(mountpt) ops = Operations(stream,self) args = ['fsname=u4pak', 'subtype=u4pak', 'ro'] if debug: foreground = True args.append('debug') if not foreground: deamonize() llfuse.init(ops, mountpt, args) try: llfuse.main(single=False) finally: llfuse.close()
def main(): options = parse_args() init_logging(options.debug) testfs = TestFs() fuse_options = set(llfuse.default_options) fuse_options.add('fsname=lltest') if options.debug_fuse: fuse_options.add('debug') llfuse.init(testfs, options.mountpoint, fuse_options) try: llfuse.main(workers=1) except: llfuse.close(unmount=False) raise llfuse.close()
def main(): options = parse_args() init_logging(options.debug) remotefs = RemoteFileFS(options.base_url, options.block_size) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=Tiler.filesystem') if options.debug_fuse: fuse_options.add('debug') llfuse.init(remotefs, options.mountpoint, fuse_options) try: llfuse.main(workers=1) except: llfuse.close(unmount=False) raise llfuse.close()
def main(): global deley_time options = parse_args(sys.argv[1:]) init_logging(options.debug) print("\nMounting Image {} -to-> {} and version period is {}..\n".format( options.image, options.mountpoint, options.time)) file = open(options.image, "rb") try: loaded_data = pickle.load(file) try: inodestruct.r_inode = loaded_data[0] pp.pprint(inodestruct.r_inode.slot) except: print("fail to load inodestruct.r_inode") try: inodestruct.datablockT = loaded_data[1] pp.pprint(inodestruct.datablockT.slot) except: print("fail to load inodestruct.datablockT") except: print("empty image") file.close() deley_time = int(options.time) print(deley_time) testfs = Vcowfs() fuse_options = set(llfuse.default_options) fuse_options.add('fsname=vcowfs') fuse_options.discard('default_permissions') if options.debug_fuse: fuse_options.add('debug') llfuse.init(testfs, options.mountpoint, fuse_options) try: llfuse.main(workers=1) except: llfuse.close(unmount=False) raise file = open(options.image, "wb") print("Saving Complete XD XD XD XD XD XD") print(inodestruct.r_inode) print(inodestruct.datablockT) pickler = pickle.Pickler(file, -1) pickler.dump([inodestruct.r_inode, inodestruct.datablockT]) file.close() llfuse.close()
def main(): options = parse_args(sys.argv[1:]) init_logging(options.debug) operations = Operations(options.source) log.debug('Mounting...') llfuse.init(operations, options.mountpoint, [ b'fsname=passthroughfs', b"nonempty" ]) try: log.debug('Entering main loop..') llfuse.main(options.single) except: llfuse.close(unmount=False) raise log.debug('Unmounting..') llfuse.close()
def mount(archive, mountpt, foreground=False, debug=False): archive = os.path.abspath(archive) mountpt = os.path.abspath(mountpt) with open(archive, "rb") as fp: ops = Operations(fp) args = ['fsname=bf64', 'subtype=bf64', 'ro'] if debug: foreground = True args.append('debug') if not foreground: deamonize() llfuse.init(ops, mountpt, args) try: llfuse.main() finally: llfuse.close()
def main(): parser = ArgumentParser() parser.add_argument('mountpoint', type=str, help='Where to mount the file system') parser.add_argument('token', type=str, help='Token of dropbox app') parser.add_argument('--debug', action='store_true', default=False, help='Enable debugging output') parser.add_argument('--debug-fuse', action='store_true', default=False, help='Enable FUSE debugging output') parser.add_argument('--tmpdir', type=str, default='/tmp/fusedive', help='Temporary local path') options = parser.parse_args() subprocess.Popen(('mkdir -p %s' % (options.tmpdir)).split()) init_logging(options.debug) pros = { 'http': "socks5://127.0.0.1:1080", 'https': "socks5://127.0.0.1:1080" } sess = dropbox.create_session(max_connections=3, proxies=pros) dbx = dropbox.Dropbox(options.token, session=sess) operations = DropboxOperations(dbx, options.tmpdir) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=dropboxfs') fuse_options.discard('default_permissions') if options.debug_fuse: fuse_options.add('debug') llfuse.init(operations, options.mountpoint, fuse_options) # sqlite3 does not support multithreading try: llfuse.main(workers=1) except: subprocess.Popen(('rm -rf %s' % (options.tmpdir)).split()) llfuse.close() raise subprocess.Popen(('rm -rf %s' % (options.tmpdir)).split()) llfuse.close()
def mount(archive, mountpt, foreground=False, debug=False): archive = os.path.abspath(archive) mountpt = os.path.abspath(mountpt) with open(archive, "rb") as fp: ops = Operations(fp) args = ['fsname=psypkg', 'subtype=psypkg', 'ro'] if debug: foreground = True args.append('debug') if not foreground: deamonize() llfuse.init(ops, mountpt, args) try: llfuse.main(single=False) finally: llfuse.close()
def mount(self, mountpoint, extra_options, foreground=False): options = ['fsname=borgfs', 'ro'] if extra_options: options.extend(extra_options.split(',')) llfuse.init(self, mountpoint, options) if not foreground: daemonize() # If the file system crashes, we do not want to umount because in that # case the mountpoint suddenly appears to become empty. This can have # nasty consequences, imagine the user has e.g. an active rsync mirror # job - seeing the mountpoint empty, rsync would delete everything in the # mirror. umount = False try: signal = fuse_main() umount = (signal is None) # no crash and no signal -> umount request finally: llfuse.close(umount)
def mount(archive,mountpt,ext_func=lambda data,offset,size:'',foreground=False,debug=False): archive = os.path.abspath(archive) mountpt = os.path.abspath(mountpt) with open(archive,"rb") as fp: ops = Operations(fp,ext_func) args = ['fsname=fezpak', 'subtype=fezpak', 'ro'] if debug: foreground = True args.append('debug') if not foreground: deamonize() llfuse.init(ops, mountpt, args) try: llfuse.main() finally: llfuse.close()
def main(): options = parse_args(sys.argv[1:]) init_logging(options.debug) operations = Operations(options.source) log.debug('Mounting...') fuse_options = set(llfuse.default_options) fuse_options.add('fsname=passthroughfs') fuse_options.add('default_permissions') llfuse.init(operations, options.mountpoint, fuse_options) try: log.debug('Entering main loop..') llfuse.main(options.single) except: llfuse.close(unmount=False) raise log.debug('Unmounting..') llfuse.close()
def main(): """ """ options = parse_args() init_logging(options.debug) mpath = getmount_point(options) tarfs = TarFS(options.tarfile) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=fuse_tar') fuse_options.add('ro') if options.debug_fuse: fuse_options.add('debug') llfuse.init(tarfs, mpath, fuse_options) try: llfuse.main() except: llfuse.close(unmount=False) raise llfuse.close()
def run_fs(mountpoint, cross_process): # Logging (note that we run in a new process, so we can't # rely on direct log capture and instead print to stdout) root_logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s ' '%(funcName)s(%(threadName)s): %(message)s', datefmt="%M:%S") handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) root_logger.addHandler(handler) root_logger.setLevel(logging.DEBUG) testfs = Fs(cross_process) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=llfuse_testfs') llfuse.init(testfs, mountpoint, fuse_options) try: llfuse.main(workers=1) finally: llfuse.close()
def main(args): options = parse_args(args) init_logging(options.debug) operations = Operations(options.lsfile, options.mountpoint) log.debug('Mounting...') fuse_options = set(llfuse.default_options) fuse_options.add('fsname=phantomfs') if options.debug_fuse: fuse_options.add('debug') llfuse.init(operations, options.mountpoint, fuse_options) try: log.debug('Entering main loop..') if options.single: llfuse.main(workers=1) else: llfuse.main() except: llfuse.close() raise log.debug('Unmounting..') llfuse.close()
def main() -> None:# {{{ """ main function """ options = parseargs() init_logging(options.debug) mpath: str = getmount_point(options) tarfs = TarFS(options.tarfile) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=fuse_tar') fuse_options.add('ro') if options.debug_fuse: fuse_options.add('debug') llfuse.init(tarfs, mpath, fuse_options) try: llfuse.main() except Exception as exc: llfuse.close(unmount=False) raise exc llfuse.close()
def run_fs(mountpoint, cross_process): # Logging (note that we run in a new process, so we can't # rely on direct log capture and instead print to stdout) root_logger = logging.getLogger() formatter = logging.Formatter( '%(asctime)s.%(msecs)03d %(levelname)s ' '%(funcName)s(%(threadName)s): %(message)s', datefmt="%M:%S") handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) root_logger.addHandler(handler) root_logger.setLevel(logging.DEBUG) testfs = Fs(cross_process) fuse_options = set(llfuse.default_options) fuse_options.add('fsname=llfuse_testfs') llfuse.init(testfs, mountpoint, fuse_options) try: llfuse.main(workers=1) finally: llfuse.close()
def mount(self, mountpoint, mount_options, foreground=False): """Mount filesystem on *mountpoint* with *mount_options*.""" options = ['fsname=borgfs', 'ro'] if mount_options: options.extend(mount_options.split(',')) try: options.remove('allow_damaged_files') self.allow_damaged_files = True except ValueError: pass try: options.remove('versions') self.versions = True except ValueError: pass self._create_filesystem() llfuse.init(self, mountpoint, options) if not foreground: old_id, new_id = daemonize() if not isinstance(self.repository_uncached, RemoteRepository): # local repo and the locking process' PID just changed, migrate it: self.repository_uncached.migrate_lock(old_id, new_id) # If the file system crashes, we do not want to umount because in that # case the mountpoint suddenly appears to become empty. This can have # nasty consequences, imagine the user has e.g. an active rsync mirror # job - seeing the mountpoint empty, rsync would delete everything in the # mirror. umount = False try: with signal_handler('SIGUSR1', self.sig_info_handler), \ signal_handler('SIGINFO', self.sig_info_handler): signal = fuse_main() # no crash and no signal (or it's ^C and we're in the foreground) -> umount request umount = (signal is None or (signal == SIGINT and foreground)) finally: llfuse.close(umount)
yield (k, self.getattr(v), v) def open(self, inode, flags): return inode def access(self, inode, mode, ctx): return True def read(self, fh, offset, length): data = '' for rv in self.p4cmd.do_print(self.gen_depot_path(fh)): data += rv['data'] return data[offset:offset+length] if __name__ == '__main__': if len(sys.argv) != 2: raise SystemExit('Usage: %s <mountpoint>' % sys.argv[0]) mountpoint = sys.argv[1] operations = P4Operations() llfuse.init(operations, mountpoint, [ b"fsname=p4-fuse", b"ro" ]) try: llfuse.main(single=True) except: llfuse.close(unmount=False) raise llfuse.close()
def unmount(): log.info("Unmounting file system...") llfuse.close(unmount=unmount_clean)
def main(args=None): '''Mount S3QL file system''' if args is None: args = sys.argv[1:] options = parse_args(args) # Save handler so that we can remove it when daemonizing stdout_log_handler = setup_logging(options) if options.threads is None: options.threads = determine_threads(options) if not os.path.exists(options.mountpoint): raise QuietError('Mountpoint does not exist.') if options.profile: import cProfile import pstats prof = cProfile.Profile() bucket_factory = get_bucket_factory(options) bucket_pool = BucketPool(bucket_factory) # Get paths cachepath = get_bucket_cachedir(options.storage_url, options.cachedir) # Retrieve metadata with bucket_pool() as bucket: (param, db) = get_metadata(bucket, cachepath, options.readonly) if options.nfs: log.info('Creating NFS indices...') # NFS may try to look up '..', so we have to speed up this kind of query db.execute('CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)') # Since we do not support generation numbers, we have to keep the # likelihood of reusing a just-deleted inode low inode_cache.RANDOMIZE_INODES = True else: db.execute('DROP INDEX IF EXISTS ix_contents_inode') metadata_upload_thread = MetadataUploadThread(bucket_pool, param, db, options.metadata_upload_interval) metadata_download_thread = MetadataDownloadThread(bucket_pool, param, cachepath, options.metadata_download_interval) block_cache = BlockCache(bucket_pool, db, cachepath + '-cache', options.cachesize * 1024, options.max_cache_entries) commit_thread = CommitThread(block_cache) operations = fs.Operations(block_cache, db, blocksize=param['blocksize'], upload_event=metadata_upload_thread.event) log.info('Mounting filesystem...') llfuse.init(operations, options.mountpoint, get_fuse_opts(options)) if not options.fg: if stdout_log_handler: logging.getLogger().removeHandler(stdout_log_handler) daemonize(options.cachedir) exc_info = setup_exchook() # After we start threads, we must be sure to terminate them # or the process will hang try: block_cache.init(options.threads) metadata_upload_thread.start() metadata_download_thread.start() commit_thread.start() if options.upstart: os.kill(os.getpid(), signal.SIGSTOP) if options.profile: prof.runcall(llfuse.main, options.single) else: llfuse.main(options.single) log.info("FUSE main loop terminated.") except: log.info("Caught exception in main loop, unmounting file system...") # Tell finally handler that there already is an exception if not exc_info: exc_info = sys.exc_info() # We do *not* unmount on exception. Why? E.g. if someone is mirroring the # mountpoint, and it suddenly becomes empty, all the mirrored data will be # deleted. However, it's crucial to still call llfuse.close, so that # Operations.destroy() can flush the inode cache. with llfuse.lock: llfuse.close(unmount=False) raise # Terminate threads finally: log.debug("Waiting for background threads...") for (op, with_lock) in ((metadata_upload_thread.stop, False), (commit_thread.stop, False), (block_cache.destroy, True), (metadata_upload_thread.join, False), (metadata_download_thread.join, False), (commit_thread.join, False)): try: if with_lock: with llfuse.lock: op() else: op() except: # We just live with the race cond here if not exc_info: exc_info = sys.exc_info() else: log.exception("Exception during cleanup:") log.debug("All background threads terminated.") # Re-raise if main loop terminated due to exception in other thread # or during cleanup if exc_info: raise exc_info[0], exc_info[1], exc_info[2] # At this point, there should be no other threads left # Unmount log.info("Unmounting file system.") with llfuse.lock: llfuse.close() # Do not update .params yet, dump_metadata() may fail if the database is # corrupted, in which case we want to force an fsck. if not options.readonly: with bucket_pool() as bucket: seq_no = get_seq_no(bucket) if metadata_upload_thread.db_mtime == os.stat(cachepath + '.db').st_mtime: log.info('File system unchanged, not uploading metadata.') del bucket['s3ql_seq_no_%d' % param['seq_no']] param['seq_no'] -= 1 pickle.dump(param, open(cachepath + '.params', 'wb'), 2) elif seq_no == param['seq_no']: log.info('Uploading metadata...') cycle_metadata(bucket) param['last-modified'] = time.time() - time.timezone with tempfile.TemporaryFile() as tmp: dump_metadata(tmp, db) tmp.seek(0) with bucket.open_write('s3ql_metadata', param) as fh: shutil.copyfileobj(tmp, fh) pickle.dump(param, open(cachepath + '.params', 'wb'), 2) else: log.error('Remote metadata is newer than local (%d vs %d), ' 'refusing to overwrite!', seq_no, param['seq_no']) log.error('The locally cached metadata will be *lost* the next time the file system ' 'is mounted or checked and has therefore been backed up.') for name in (cachepath + '.params', cachepath + '.db'): for i in reversed(range(4)): if os.path.exists(name + '.%d' % i): os.rename(name + '.%d' % i, name + '.%d' % (i+1)) os.rename(name, name + '.0') db.execute('ANALYZE') db.execute('VACUUM') db.close() if options.profile: tmp = tempfile.NamedTemporaryFile() prof.dump_stats(tmp.name) fh = open('s3ql_profile.txt', 'w') p = pstats.Stats(tmp.name, stream=fh) tmp.close() p.strip_dirs() p.sort_stats('cumulative') p.print_stats(50) p.sort_stats('time') p.print_stats(50) fh.close()
def main(args=None): '''Mount S3QL file system''' if args is None: args = sys.argv[1:] options = parse_args(args) # Save handler so that we can remove it when daemonizing stdout_log_handler = setup_logging(options) if options.threads is None: options.threads = determine_threads(options) if not os.path.exists(options.mountpoint): raise QuietError('Mountpoint does not exist.') if options.profile: import cProfile import pstats prof = cProfile.Profile() backend_factory = get_backend_factory(options) backend_pool = BackendPool(backend_factory) # Get paths cachepath = get_backend_cachedir(options.storage_url, options.cachedir) # Retrieve metadata try: with backend_pool() as backend: (param, db) = get_metadata(backend, cachepath) except DanglingStorageURLError as exc: raise QuietError(str(exc)) if param['max_obj_size'] < options.min_obj_size: raise QuietError('Maximum object size must be bigger than minimum object size.') if options.nfs: # NFS may try to look up '..', so we have to speed up this kind of query log.info('Creating NFS indices...') db.execute('CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)') else: db.execute('DROP INDEX IF EXISTS ix_contents_inode') metadata_upload_thread = MetadataUploadThread(backend_pool, param, db, options.metadata_upload_interval) block_cache = BlockCache(backend_pool, db, cachepath + '-cache', options.cachesize * 1024, options.max_cache_entries) commit_thread = CommitThread(block_cache) operations = fs.Operations(block_cache, db, max_obj_size=param['max_obj_size'], inode_cache=InodeCache(db, param['inode_gen']), upload_event=metadata_upload_thread.event) log.info('Mounting filesystem...') try: llfuse.init(operations, options.mountpoint, get_fuse_opts(options)) except RuntimeError as exc: raise QuietError(str(exc)) # From here on, we have to clean-up the mountpoint and # terminate started threads. try: if not options.fg: if stdout_log_handler: logging.getLogger().removeHandler(stdout_log_handler) daemonize(options.cachedir) exc_info = setup_exchook() mark_metadata_dirty(backend, cachepath, param) block_cache.init(options.threads) metadata_upload_thread.start() commit_thread.start() if options.upstart: os.kill(os.getpid(), signal.SIGSTOP) if options.profile: prof.runcall(llfuse.main, options.single) else: llfuse.main(options.single) # Re-raise if main loop terminated due to exception in other thread # or during cleanup, but make sure we still unmount file system # (so that Operations' destroy handler gets called) if exc_info: (tmp0, tmp1, tmp2) = exc_info exc_info[:] = [] raise tmp0, tmp1, tmp2 log.info("FUSE main loop terminated.") except: # Tell finally block not to raise any additional exceptions exc_info[:] = sys.exc_info() log.warn('Encountered exception, trying to clean up...') # We do *not* free the mountpoint on exception. Why? E.g. if someone is # mirroring the mountpoint, and it suddenly becomes empty, all the # mirrored data will be deleted. However, it's crucial to still call # llfuse.close, so that Operations.destroy() can flush the inode cache. try: log.info("Unmounting file system...") with llfuse.lock: llfuse.close(unmount=False) except: log.exception("Exception during cleanup:") raise else: # llfuse.close() still needs block_cache. log.info("Unmounting file system...") with llfuse.lock: llfuse.close() # Terminate threads finally: log.debug("Waiting for background threads...") for (op, with_lock) in ((metadata_upload_thread.stop, False), (commit_thread.stop, False), (block_cache.destroy, True), (metadata_upload_thread.join, False), (commit_thread.join, False)): try: if with_lock: with llfuse.lock: op() else: op() except: # We just live with the race cond here if not exc_info: exc_info = sys.exc_info() else: log.exception("Exception during cleanup:") log.debug("All background threads terminated.") # Re-raise if there's been an exception during cleanup # (either in main thread or other thread) if exc_info: raise exc_info[0], exc_info[1], exc_info[2] # At this point, there should be no other threads left # Do not update .params yet, dump_metadata() may fail if the database is # corrupted, in which case we want to force an fsck. param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes') if operations.failsafe: log.warn('File system errors encountered, marking for fsck.') param['needs_fsck'] = True with backend_pool() as backend: seq_no = get_seq_no(backend) if metadata_upload_thread.db_mtime == os.stat(cachepath + '.db').st_mtime: log.info('File system unchanged, not uploading metadata.') del backend['s3ql_seq_no_%d' % param['seq_no']] param['seq_no'] -= 1 pickle.dump(param, open(cachepath + '.params', 'wb'), 2) elif seq_no == param['seq_no']: param['last-modified'] = time.time() log.info('Dumping metadata...') fh = tempfile.TemporaryFile() dump_metadata(db, fh) def do_write(obj_fh): fh.seek(0) stream_write_bz2(fh, obj_fh) return obj_fh log.info("Compressing and uploading metadata...") obj_fh = backend.perform_write(do_write, "s3ql_metadata_new", metadata=param, is_compressed=True) log.info('Wrote %.2f MiB of compressed metadata.', obj_fh.get_obj_size() / 1024 ** 2) log.info('Cycling metadata backups...') cycle_metadata(backend) with open(cachepath + '.params', 'wb') as fh: pickle.dump(param, fh, 2) else: log.error('Remote metadata is newer than local (%d vs %d), ' 'refusing to overwrite!', seq_no, param['seq_no']) log.error('The locally cached metadata will be *lost* the next time the file system ' 'is mounted or checked and has therefore been backed up.') for name in (cachepath + '.params', cachepath + '.db'): for i in reversed(range(4)): if os.path.exists(name + '.%d' % i): os.rename(name + '.%d' % i, name + '.%d' % (i + 1)) os.rename(name, name + '.0') log.info('Cleaning up local metadata...') db.execute('ANALYZE') db.execute('VACUUM') db.close() if options.profile: with tempfile.NamedTemporaryFile() as tmp, \ open('s3ql_profile.txt', 'w') as fh: prof.dump_stats(tmp.name) p = pstats.Stats(tmp.name, stream=fh) p.strip_dirs() p.sort_stats('cumulative') p.print_stats(50) p.sort_stats('time') p.print_stats(50) log.info('All done.')
def unmount(): log.info("Unmounting file system...") # Acquire lock so that Operations.destroy() is called with the # global lock like all other handlers with llfuse.lock: llfuse.close(unmount=unmount_clean)
parser.add_argument('-i', '--known-ip', help='IP of the known machine in the circle') parser.add_argument('-p', '--known-port', help='Port of the known machine in the circle') parser.add_argument('mountpoint', help='Root directory of mounted BuddyFS') args = parser.parse_args() logLevel = logging.INFO if args.verbose: logLevel = logging.DEBUG logging.basicConfig(level=logLevel) operations = BuddyFSOperations(args.key_id, args.start_port, args.known_ip, args.known_port) operations.auto_create_filesystem() logger.info('Mounting BuddyFS') llfuse.init(operations, args.mountpoint, [b'fsname=BuddyFS']) logger.info('Mounted BuddyFS at %s' % (args.mountpoint)) try: llfuse.main(single=False) except: llfuse.close(unmount=False) raise llfuse.close()
st.st_uid = 1000 st.st_gid = 1000 st.st_rdev = 0 st.st_size = 0 st.st_blksize = 512 st.st_blocks = 1 return st def readdir(self, path, offset=0): if path == ".": bucket = self.swift_conn.get_account()[1][offset] yield (bucket['name'], self.getattr(bucket['name']), int(offset + 1)) def init_logging(): formatter = logging.Formatter('%(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(logging.INFO) log.setLevel(logging.INFO) log.addHandler(handler) if __name__ == '__main__': init_logging() sf = swiftfuse() mountpoint = sys.argv[1] llfuse.init(sf, mountpoint, []) llfuse.main(single=True) llfuse.close()
def mount(self, mountpoint, mount_options, foreground=False): """Mount filesystem on *mountpoint* with *mount_options*.""" def pop_option(options, key, present, not_present, wanted_type, int_base=0): assert isinstance(options, list) # we mutate this for idx, option in enumerate(options): if option == key: options.pop(idx) return present if option.startswith(key + '='): options.pop(idx) value = option.split('=', 1)[1] if wanted_type is bool: v = value.lower() if v in ('y', 'yes', 'true', '1'): return True if v in ('n', 'no', 'false', '0'): return False raise ValueError('unsupported value in option: %s' % option) if wanted_type is int: try: return int(value, base=int_base) except ValueError: raise ValueError('unsupported value in option: %s' % option) from None try: return wanted_type(value) except ValueError: raise ValueError('unsupported value in option: %s' % option) from None else: return not_present # default_permissions enables permission checking by the kernel. Without # this, any umask (or uid/gid) would not have an effect and this could # cause security issues if used with allow_other mount option. # When not using allow_other or allow_root, access is limited to the # mounting user anyway. options = ['fsname=borgfs', 'ro', 'default_permissions'] if mount_options: options.extend(mount_options.split(',')) ignore_permissions = pop_option(options, 'ignore_permissions', True, False, bool) if ignore_permissions: # in case users have a use-case that requires NOT giving "default_permissions", # this is enabled by the custom "ignore_permissions" mount option which just # removes "default_permissions" again: pop_option(options, 'default_permissions', True, False, bool) self.allow_damaged_files = pop_option(options, 'allow_damaged_files', True, False, bool) self.versions = pop_option(options, 'versions', True, False, bool) self.uid_forced = pop_option(options, 'uid', None, None, int) self.gid_forced = pop_option(options, 'gid', None, None, int) self.umask = pop_option(options, 'umask', 0, 0, int, int_base=8) # umask is octal, e.g. 222 or 0222 dir_uid = self.uid_forced if self.uid_forced is not None else self.default_uid dir_gid = self.gid_forced if self.gid_forced is not None else self.default_gid dir_mode = 0o40755 & ~self.umask self.default_dir = Item(mode=dir_mode, mtime=int(time.time() * 1e9), uid=dir_uid, gid=dir_gid) self._create_filesystem() llfuse.init(self, mountpoint, options) if not foreground: old_id, new_id = daemonize() if not isinstance(self.repository_uncached, RemoteRepository): # local repo and the locking process' PID just changed, migrate it: self.repository_uncached.migrate_lock(old_id, new_id) # If the file system crashes, we do not want to umount because in that # case the mountpoint suddenly appears to become empty. This can have # nasty consequences, imagine the user has e.g. an active rsync mirror # job - seeing the mountpoint empty, rsync would delete everything in the # mirror. umount = False try: with signal_handler('SIGUSR1', self.sig_info_handler), \ signal_handler('SIGINFO', self.sig_info_handler): signal = fuse_main() # no crash and no signal (or it's ^C and we're in the foreground) -> umount request umount = (signal is None or (signal == SIGINT and foreground)) finally: llfuse.close(umount)
def main(): try: llfuse.main() finally: llfuse.close()
entry.st_mtime = node.mtime entry.st_ctime = node.ctime print entry.st_nlink, entry.st_mode, entry.st_ino, entry.st_uid, entry.st_gid, entry.st_size, entry.st_atime return entry def open(self, inode, flags): print 'Opening file %d with flags %s' % (inode, flags) self.inode_open_count[inode] += 1 return inode def access(self, inode, mode, ctx): return True if __name__ == '__main__': parser = argparse.ArgumentParser(prog='MyCloud') parser.add_argument('mountpoint', help='Root directory of mounted MyCloud') args = parser.parse_args() operations = MyCloudOperations() llfuse.init(operations, args.mountpoint, [ b'fsname=MyCloud' ]) try: llfuse.main(single=True) except: llfuse.close(unmount=True) raise llfuse.close()