def run(self): log.debug('MetadataUploadThread: start') while not self.quit: self.event.wait(self.interval) self.event.clear() if self.quit: break with llfuse.lock: if self.quit: break new_mtime = os.stat(self.db.file).st_mtime if self.db_mtime == new_mtime: log.info('File system unchanged, not uploading metadata.') continue # We dump to a file first, so that we don't hold the # lock for quite so long. log.info('Saving metadata...') fh = tempfile.TemporaryFile() dump_metadata(fh, self.db) with self.bucket_pool() as bucket: seq_no = get_seq_no(bucket) if seq_no != self.param['seq_no']: log.error('Remote metadata is newer than local (%d vs %d), ' 'refusing to overwrite!', seq_no, self.param['seq_no']) fh.close() continue log.info("Compressing & uploading metadata..") cycle_metadata(bucket) fh.seek(0) self.param['last-modified'] = time.time() - time.timezone # Temporarily decrease sequence no, this is not the final upload self.param['seq_no'] -= 1 with bucket.open_write("s3ql_metadata", self.param) as obj_fh: shutil.copyfileobj(fh, obj_fh) self.param['seq_no'] += 1 fh.close() self.db_mtime = new_mtime log.debug('MetadataUploadThread: end')
def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # Check if fs is mounted on this computer # This is not foolproof but should prevent common mistakes match = options.storage_url + ' /' with open('/proc/mounts', 'r') as fh: for line in fh: if line.startswith(match): raise QuietError('Can not check mounted file system.') bucket = get_bucket(options) cachepath = get_bucket_cachedir(options.storage_url, options.cachedir) seq_no = get_seq_no(bucket) param_remote = bucket.lookup('s3ql_metadata') db = None if os.path.exists(cachepath + '.params'): assert os.path.exists(cachepath + '.db') param = pickle.load(open(cachepath + '.params', 'rb')) if param['seq_no'] < seq_no: log.info('Ignoring locally cached metadata (outdated).') param = bucket.lookup('s3ql_metadata') else: log.info('Using cached metadata.') db = Connection(cachepath + '.db') assert not os.path.exists(cachepath + '-cache') or param['needs_fsck'] if param_remote['seq_no'] != param['seq_no']: log.warn('Remote metadata is outdated.') param['needs_fsck'] = True else: param = param_remote assert not os.path.exists(cachepath + '-cache') # .db might exist if mount.s3ql is killed at exactly the right instant # and should just be ignored. # Check revision if param['revision'] < CURRENT_FS_REV: raise QuietError('File system revision too old, please run `s3qladm upgrade` first.') elif param['revision'] > CURRENT_FS_REV: raise QuietError('File system revision too new, please update your ' 'S3QL installation.') if param['seq_no'] < seq_no: if bucket.is_get_consistent(): print(textwrap.fill(textwrap.dedent('''\ Up to date metadata is not available. Probably the file system has not been properly unmounted and you should try to run fsck on the computer where the file system has been mounted most recently. '''))) else: print(textwrap.fill(textwrap.dedent('''\ Up to date metadata is not available. Either the file system has not been unmounted cleanly or the data has not yet propagated through the backend. In the later case, waiting for a while should fix the problem, in the former case you should try to run fsck on the computer where the file system has been mounted most recently '''))) print('Enter "continue" to use the outdated data anyway:', '> ', sep='\n', end='') if options.batch: raise QuietError('(in batch mode, exiting)') if sys.stdin.readline().strip() != 'continue': raise QuietError() param['seq_no'] = seq_no param['needs_fsck'] = True if (not param['needs_fsck'] and ((time.time() - time.timezone) - param['last_fsck']) < 60 * 60 * 24 * 31): # last check more than 1 month ago if options.force: log.info('File system seems clean, checking anyway.') else: log.info('File system is marked as clean. Use --force to force checking.') return # If using local metadata, check consistency if db: log.info('Checking DB integrity...') try: # get_list may raise CorruptError itself res = db.get_list('PRAGMA integrity_check(20)') if res[0][0] != u'ok': log.error('\n'.join(x[0] for x in res )) raise apsw.CorruptError() except apsw.CorruptError: raise QuietError('Local metadata is corrupted. Remove or repair the following ' 'files manually and re-run fsck:\n' + cachepath + '.db (corrupted)\n' + cachepath + '.param (intact)') else: log.info("Downloading & uncompressing metadata...") os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC, stat.S_IRUSR | stat.S_IWUSR)) db = Connection(cachepath + '.db.tmp', fast_mode=True) with bucket.open_read("s3ql_metadata") as fh: restore_metadata(fh, db) db.close() os.rename(cachepath + '.db.tmp', cachepath + '.db') db = Connection(cachepath + '.db') # Increase metadata sequence no param['seq_no'] += 1 param['needs_fsck'] = True bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty' pickle.dump(param, open(cachepath + '.params', 'wb'), 2) fsck = Fsck(cachepath + '-cache', bucket, param, db) fsck.check() if fsck.uncorrectable_errors: raise QuietError("Uncorrectable errors found, aborting.") if os.path.exists(cachepath + '-cache'): os.rmdir(cachepath + '-cache') log.info('Saving metadata...') fh = tempfile.TemporaryFile() dump_metadata(fh, db) log.info("Compressing & uploading metadata..") cycle_metadata(bucket) fh.seek(0) param['needs_fsck'] = False param['last_fsck'] = time.time() - time.timezone param['last-modified'] = time.time() - time.timezone with bucket.open_write("s3ql_metadata", param) as dst: fh.seek(0) shutil.copyfileobj(fh, dst) fh.close() pickle.dump(param, open(cachepath + '.params', 'wb'), 2) db.execute('ANALYZE') db.execute('VACUUM') db.close()
def upgrade(bucket): '''Upgrade file system to newest revision''' # Access to protected member #pylint: disable=W0212 log.info('Getting file system parameters..') seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in bucket.list('s3ql_seq_no_') ] seq_no = max(seq_nos) if not seq_nos: raise QuietError(textwrap.dedent(''' File system revision too old to upgrade! You need to use an older S3QL version to upgrade to a more recent revision before you can use this version to upgrade to the newest revision. ''')) param = bucket.lookup('s3ql_metadata') # Check for unclean shutdown if param['seq_no'] < seq_no: if bucket.is_get_consistent(): raise QuietError(textwrap.fill(textwrap.dedent('''\ It appears that the file system is still mounted somewhere else. If this is not the case, the file system may have not been unmounted cleanly and you should try to run fsck on the computer where the file system has been mounted most recently. '''))) else: raise QuietError(textwrap.fill(textwrap.dedent('''\ It appears that the file system is still mounted somewhere else. If this is not the case, the file system may have not been unmounted cleanly or the data from the most-recent mount may have not yet propagated through the backend. In the later case, waiting for a while should fix the problem, in the former case you should try to run fsck on the computer where the file system has been mounted most recently. '''))) # Check that the fs itself is clean if param['needs_fsck']: raise QuietError("File system damaged, run fsck!") # Check revision if param['revision'] < CURRENT_FS_REV - 1: raise QuietError(textwrap.dedent(''' File system revision too old to upgrade! You need to use an older S3QL version to upgrade to a more recent revision before you can use this version to upgrade to the newest revision. ''')) elif param['revision'] >= CURRENT_FS_REV: print('File system already at most-recent revision') return print(textwrap.dedent(''' I am about to update the file system to the newest revision. You will not be able to access the file system with any older version of S3QL after this operation. You should make very sure that this command is not interrupted and that no one else tries to mount, fsck or upgrade the file system at the same time. ''')) print('Please enter "yes" to continue.', '> ', sep='\n', end='') if sys.stdin.readline().strip().lower() != 'yes': raise QuietError() log.info('Upgrading from revision %d to %d...', CURRENT_FS_REV - 1, CURRENT_FS_REV) if 's3ql_hash_check_status' not in bucket: if (isinstance(bucket, LegacyLocalBucket) or (isinstance(bucket, BetterBucket) and isinstance(bucket.bucket, LegacyLocalBucket))): if isinstance(bucket, LegacyLocalBucket): bucketpath = bucket.name else: bucketpath = bucket.bucket.name for (path, _, filenames) in os.walk(bucketpath, topdown=True): for name in filenames: if not name.endswith('.meta'): continue basename = os.path.splitext(name)[0] if '=00' in basename: raise RuntimeError("No, seriously, you tried to break things, didn't you?") with open(os.path.join(path, name), 'r+b') as dst: dst.seek(0, os.SEEK_END) with open(os.path.join(path, basename + '.dat'), 'rb') as src: shutil.copyfileobj(src, dst) basename = basename.replace('#', '=23') os.rename(os.path.join(path, name), os.path.join(path, basename)) os.unlink(os.path.join(path, basename + '.dat')) if isinstance(bucket, LegacyLocalBucket): bucket = LocalBucket(bucket.name, None, None) else: bucket.bucket = LocalBucket(bucket.bucket.name, None, None) # Download metadata log.info("Downloading & uncompressing metadata...") dbfile = tempfile.NamedTemporaryFile() with tempfile.TemporaryFile() as tmp: with bucket.open_read("s3ql_metadata") as fh: shutil.copyfileobj(fh, tmp) db = Connection(dbfile.name, fast_mode=True) tmp.seek(0) restore_legacy_metadata(tmp, db) # Increase metadata sequence no param['seq_no'] += 1 bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty' for i in seq_nos: if i < param['seq_no'] - 5: del bucket['s3ql_seq_no_%d' % i ] log.info("Uploading database..") cycle_metadata(bucket) param['last-modified'] = time.time() - time.timezone with bucket.open_write("s3ql_metadata", param) as fh: dump_metadata(fh, db) else: log.info("Downloading & uncompressing metadata...") dbfile = tempfile.NamedTemporaryFile() with tempfile.TemporaryFile() as tmp: with bucket.open_read("s3ql_metadata") as fh: shutil.copyfileobj(fh, tmp) db = Connection(dbfile.name, fast_mode=True) tmp.seek(0) restore_metadata(tmp, db) print(textwrap.dedent(''' The following process may take a long time, but can be interrupted with Ctrl-C and resumed from this point by calling `s3qladm upgrade` again. Please see Changes.txt for why this is necessary. ''')) if 's3ql_hash_check_status' not in bucket: log.info("Starting hash verification..") start_obj = 0 else: start_obj = int(bucket['s3ql_hash_check_status']) log.info("Resuming hash verification with object %d..", start_obj) try: total = db.get_val('SELECT COUNT(id) FROM objects') i = 0 for (obj_id, hash_) in db.query('SELECT obj_id, hash FROM blocks JOIN objects ' 'ON obj_id == objects.id WHERE obj_id > ? ' 'ORDER BY obj_id ASC', (start_obj,)): if i % 100 == 0: log.info(' ..checked %d/%d objects..', i, total) sha = hashlib.sha256() with bucket.open_read("s3ql_data_%d" % obj_id) as fh: while True: buf = fh.read(128*1024) if not buf: break sha.update(buf) if sha.digest() != hash_: log.warn('Object %d corrupted! Deleting..', obj_id) bucket.delete('s3ql_data_%d' % obj_id) i += 1 except KeyboardInterrupt: log.info("Storing verification status...") bucket['s3ql_hash_check_status'] = '%d' % obj_id raise QuietError('Aborting..') log.info('Running fsck...') fsck = Fsck(tempfile.mkdtemp(), bucket, param, db) fsck.check() if fsck.uncorrectable_errors: raise QuietError("Uncorrectable errors found, aborting.") param['revision'] = CURRENT_FS_REV param['seq_no'] += 1 bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty' log.info("Uploading database..") cycle_metadata(bucket) param['last-modified'] = time.time() - time.timezone with bucket.open_write("s3ql_metadata", param) as fh: dump_metadata(fh, db)
def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) plain_bucket = get_bucket(options, plain=True) if 's3ql_metadata' in plain_bucket: if not options.force: raise QuietError("Found existing file system! Use --force to overwrite") log.info('Purging existing file system data..') plain_bucket.clear() if not plain_bucket.is_get_consistent(): log.info('Please note that the new file system may appear inconsistent\n' 'for a while until the removals have propagated through the backend.') if not options.plain: if sys.stdin.isatty(): wrap_pw = getpass("Enter encryption password: "******"Confirm encryption password: "******"Passwords don't match.") else: wrap_pw = sys.stdin.readline().rstrip() # Generate data encryption passphrase log.info('Generating random encryption key...') fh = open('/dev/urandom', "rb", 0) # No buffering data_pw = fh.read(32) fh.close() bucket = BetterBucket(wrap_pw, 'bzip2', plain_bucket) bucket['s3ql_passphrase'] = data_pw else: data_pw = None bucket = BetterBucket(data_pw, 'bzip2', plain_bucket) # Setup database cachepath = get_bucket_cachedir(options.storage_url, options.cachedir) # There can't be a corresponding bucket, so we can safely delete # these files. if os.path.exists(cachepath + '.db'): os.unlink(cachepath + '.db') if os.path.exists(cachepath + '-cache'): shutil.rmtree(cachepath + '-cache') log.info('Creating metadata tables...') db = Connection(cachepath + '.db') create_tables(db) init_tables(db) param = dict() param['revision'] = CURRENT_FS_REV param['seq_no'] = 1 param['label'] = options.label param['blocksize'] = options.blocksize * 1024 param['needs_fsck'] = False param['last_fsck'] = time.time() - time.timezone param['last-modified'] = time.time() - time.timezone # This indicates that the convert_legacy_metadata() stuff # in BetterBucket is not required for this file system. param['bucket_revision'] = 1 bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty') log.info('Uploading metadata...') with bucket.open_write('s3ql_metadata', param) as fh: dump_metadata(fh, db) pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
def main(args=None): '''Mount S3QL file system''' if args is None: args = sys.argv[1:] options = parse_args(args) # Save handler so that we can remove it when daemonizing stdout_log_handler = setup_logging(options) if options.threads is None: options.threads = determine_threads(options) if not os.path.exists(options.mountpoint): raise QuietError('Mountpoint does not exist.') if options.profile: import cProfile import pstats prof = cProfile.Profile() bucket_factory = get_bucket_factory(options) bucket_pool = BucketPool(bucket_factory) # Get paths cachepath = get_bucket_cachedir(options.storage_url, options.cachedir) # Retrieve metadata with bucket_pool() as bucket: (param, db) = get_metadata(bucket, cachepath, options.readonly) if options.nfs: log.info('Creating NFS indices...') # NFS may try to look up '..', so we have to speed up this kind of query db.execute('CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)') # Since we do not support generation numbers, we have to keep the # likelihood of reusing a just-deleted inode low inode_cache.RANDOMIZE_INODES = True else: db.execute('DROP INDEX IF EXISTS ix_contents_inode') metadata_upload_thread = MetadataUploadThread(bucket_pool, param, db, options.metadata_upload_interval) metadata_download_thread = MetadataDownloadThread(bucket_pool, param, cachepath, options.metadata_download_interval) block_cache = BlockCache(bucket_pool, db, cachepath + '-cache', options.cachesize * 1024, options.max_cache_entries) commit_thread = CommitThread(block_cache) operations = fs.Operations(block_cache, db, blocksize=param['blocksize'], upload_event=metadata_upload_thread.event) log.info('Mounting filesystem...') llfuse.init(operations, options.mountpoint, get_fuse_opts(options)) if not options.fg: if stdout_log_handler: logging.getLogger().removeHandler(stdout_log_handler) daemonize(options.cachedir) exc_info = setup_exchook() # After we start threads, we must be sure to terminate them # or the process will hang try: block_cache.init(options.threads) metadata_upload_thread.start() metadata_download_thread.start() commit_thread.start() if options.upstart: os.kill(os.getpid(), signal.SIGSTOP) if options.profile: prof.runcall(llfuse.main, options.single) else: llfuse.main(options.single) log.info("FUSE main loop terminated.") except: log.info("Caught exception in main loop, unmounting file system...") # Tell finally handler that there already is an exception if not exc_info: exc_info = sys.exc_info() # We do *not* unmount on exception. Why? E.g. if someone is mirroring the # mountpoint, and it suddenly becomes empty, all the mirrored data will be # deleted. However, it's crucial to still call llfuse.close, so that # Operations.destroy() can flush the inode cache. with llfuse.lock: llfuse.close(unmount=False) raise # Terminate threads finally: log.debug("Waiting for background threads...") for (op, with_lock) in ((metadata_upload_thread.stop, False), (commit_thread.stop, False), (block_cache.destroy, True), (metadata_upload_thread.join, False), (metadata_download_thread.join, False), (commit_thread.join, False)): try: if with_lock: with llfuse.lock: op() else: op() except: # We just live with the race cond here if not exc_info: exc_info = sys.exc_info() else: log.exception("Exception during cleanup:") log.debug("All background threads terminated.") # Re-raise if main loop terminated due to exception in other thread # or during cleanup if exc_info: raise exc_info[0], exc_info[1], exc_info[2] # At this point, there should be no other threads left # Unmount log.info("Unmounting file system.") with llfuse.lock: llfuse.close() # Do not update .params yet, dump_metadata() may fail if the database is # corrupted, in which case we want to force an fsck. if not options.readonly: with bucket_pool() as bucket: seq_no = get_seq_no(bucket) if metadata_upload_thread.db_mtime == os.stat(cachepath + '.db').st_mtime: log.info('File system unchanged, not uploading metadata.') del bucket['s3ql_seq_no_%d' % param['seq_no']] param['seq_no'] -= 1 pickle.dump(param, open(cachepath + '.params', 'wb'), 2) elif seq_no == param['seq_no']: log.info('Uploading metadata...') cycle_metadata(bucket) param['last-modified'] = time.time() - time.timezone with tempfile.TemporaryFile() as tmp: dump_metadata(tmp, db) tmp.seek(0) with bucket.open_write('s3ql_metadata', param) as fh: shutil.copyfileobj(tmp, fh) pickle.dump(param, open(cachepath + '.params', 'wb'), 2) else: log.error('Remote metadata is newer than local (%d vs %d), ' 'refusing to overwrite!', seq_no, param['seq_no']) log.error('The locally cached metadata will be *lost* the next time the file system ' 'is mounted or checked and has therefore been backed up.') for name in (cachepath + '.params', cachepath + '.db'): for i in reversed(range(4)): if os.path.exists(name + '.%d' % i): os.rename(name + '.%d' % i, name + '.%d' % (i+1)) os.rename(name, name + '.0') db.execute('ANALYZE') db.execute('VACUUM') db.close() if options.profile: tmp = tempfile.NamedTemporaryFile() prof.dump_stats(tmp.name) fh = open('s3ql_profile.txt', 'w') p = pstats.Stats(tmp.name, stream=fh) tmp.close() p.strip_dirs() p.sort_stats('cumulative') p.print_stats(50) p.sort_stats('time') p.print_stats(50) fh.close()