def runTest(self): skip_without_rsync() ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-') try: populate_dir(ref_dir) # Make file system and fake high inode number self.mkfs() db = Connection( get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?', (2**31 + 10, 'inodes')) db.close() # Copy source data self.mount() subprocess.check_call( ['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/']) self.umount() # Check that inode watermark is high db = Connection( get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') self.assertGreater( db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes', )), 2**31 + 10) self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'), 2**31 + 10) db.close() # Renumber inodes self.fsck() # Check if renumbering was done db = Connection( get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') self.assertLess( db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes', )), 2**31) self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2**31) db.close() # Compare self.mount() try: out = check_output([ 'rsync', '-anciHAX', '--delete', '--exclude', '/lost+found', ref_dir + '/', self.mnt_dir + '/' ], universal_newlines=True, stderr=subprocess.STDOUT) except CalledProcessError as exc: self.fail('rsync failed with ' + exc.output) if out: self.fail('Copy not equal to original, rsync says:\n' + out) self.umount() finally: shutil.rmtree(ref_dir)
def runTest(self): try: subprocess.call(['rsync', '--version'], stderr=subprocess.STDOUT, stdout=open('/dev/null', 'wb')) except OSError as exc: if exc.errno == errno.ENOENT: raise unittest.SkipTest('rsync not installed') raise ref_dir = tempfile.mkdtemp() try: populate_dir(ref_dir) # Make file system and fake high inode number self.mkfs() db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?', (2 ** 31 + 10, u'inodes')) db.close() # Copy source data self.mount() subprocess.check_call(['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/']) self.umount() # Check that inode watermark is high db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') self.assertGreater(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31 + 10) self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31 + 10) db.close() # Renumber inodes self.fsck() # Check if renumbering was done db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') self.assertLess(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31) self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31) db.close() # Compare self.mount() rsync = subprocess.Popen(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found', ref_dir + '/', self.mnt_dir + '/'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = rsync.communicate()[0] if out: self.fail('Copy not equal to original, rsync says:\n' + out) elif rsync.returncode != 0: self.fail('rsync failed with ' + out) self.umount() finally: shutil.rmtree(ref_dir)
def test(self): skip_without_rsync() ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-') try: populate_dir(ref_dir) # Make file system and fake high inode number self.mkfs() db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?', (2 ** 31 + 10, 'inodes')) db.close() # Copy source data self.mount() subprocess.check_call(['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/']) self.umount() # Check that inode watermark is high db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes',)) > 2 ** 31 + 10 assert db.get_val('SELECT MAX(id) FROM inodes') > 2 ** 31 + 10 db.close() # Renumber inodes self.fsck() # Check if renumbering was done db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db') assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', ('inodes',)) < 2 ** 31 assert db.get_val('SELECT MAX(id) FROM inodes') < 2 ** 31 db.close() # Compare self.mount() try: out = check_output(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found', ref_dir + '/', self.mnt_dir + '/'], universal_newlines=True, stderr=subprocess.STDOUT) except CalledProcessError as exc: pytest.fail('rsync failed with ' + exc.output) if out: pytest.fail('Copy not equal to original, rsync says:\n' + out) self.umount() finally: shutil.rmtree(ref_dir)
def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # Check for cached metadata cachepath = get_backend_cachedir(options.storage_url, options.cachedir) if not os.path.exists(cachepath + '.params'): raise QuietError("No local metadata found.") with open(cachepath + '.params', 'rb') as fh: param = pickle.load(fh) # Check revision if param['revision'] < CURRENT_FS_REV: raise QuietError('File system revision too old.') elif param['revision'] > CURRENT_FS_REV: raise QuietError('File system revision too new.') if os.path.exists(DBNAME): raise QuietError('%s exists, aborting.' % DBNAME) log.info('Copying database...') dst = tempfile.NamedTemporaryFile() with open(cachepath + '.db', 'rb') as src: shutil.copyfileobj(src, dst) dst.flush() db = Connection(dst.name) log.info('Scrambling...') md5 = lambda x: hashlib.md5(x).hexdigest() for (id_, name) in db.query('SELECT id, name FROM names'): db.execute('UPDATE names SET name=? WHERE id=?', (md5(name), id_)) for (id_, name) in db.query('SELECT inode, target FROM symlink_targets'): db.execute('UPDATE symlink_targets SET target=? WHERE inode=?', (md5(name), id_)) for (id_, name) in db.query('SELECT rowid, value FROM ext_attributes'): db.execute('UPDATE ext_attributes SET value=? WHERE rowid=?', (md5(name), id_)) log.info('Saving...') with open(DBNAME, 'wb+') as fh: dump_metadata(db, fh)
def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # Check for cached metadata cachepath = get_backend_cachedir(options.storage_url, options.cachedir) if not os.path.exists(cachepath + '.params'): raise QuietError("No local metadata found.") param = load_params(cachepath) # Check revision if param['revision'] < CURRENT_FS_REV: raise QuietError('File system revision too old.') elif param['revision'] > CURRENT_FS_REV: raise QuietError('File system revision too new.') if os.path.exists(DBNAME): raise QuietError('%s exists, aborting.' % DBNAME) log.info('Copying database...') dst = tempfile.NamedTemporaryFile() with open(cachepath + '.db', 'rb') as src: shutil.copyfileobj(src, dst) dst.flush() db = Connection(dst.name) log.info('Scrambling...') md5 = lambda x: hashlib.md5(x).hexdigest() for (id_, name) in db.query('SELECT id, name FROM names'): db.execute('UPDATE names SET name=? WHERE id=?', (md5(name), id_)) for (id_, name) in db.query('SELECT inode, target FROM symlink_targets'): db.execute('UPDATE symlink_targets SET target=? WHERE inode=?', (md5(name), id_)) for (id_, name) in db.query('SELECT rowid, value FROM ext_attributes'): db.execute('UPDATE ext_attributes SET value=? WHERE rowid=?', (md5(name), id_)) log.info('Saving...') with open(DBNAME, 'wb+') as fh: dump_metadata(db, fh)