Ejemplo n.º 1
0
def db():
    dbfile = tempfile.NamedTemporaryFile()
    db = Connection(dbfile.name)
    create_tables(db)
    init_tables(db)
    try:
        yield db
    finally:
        db.close()
        dbfile.close()
Ejemplo n.º 2
0
def db():
    dbfile = tempfile.NamedTemporaryFile()
    db = Connection(dbfile.name)
    create_tables(db)
    init_tables(db)
    try:
        yield db
    finally:
        db.close()
        dbfile.close()
Ejemplo n.º 3
0
    def setUp(self):
        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)
        self.cache = inode_cache.InodeCache(self.db, 0)
Ejemplo n.º 4
0
    def setUp(self):
        self.tmpfh1 = tempfile.NamedTemporaryFile()
        self.tmpfh2 = tempfile.NamedTemporaryFile()
        self.src = Connection(self.tmpfh1.name)
        self.dst = Connection(self.tmpfh2.name)
        self.fh = tempfile.TemporaryFile()

        # Disable exclusive locking for all tests
        self.src.execute('PRAGMA locking_mode = NORMAL')
        self.dst.execute('PRAGMA locking_mode = NORMAL')

        self.create_table(self.src)
        self.create_table(self.dst)
Ejemplo n.º 5
0
    def setUp(self):
        self.tmpfh1 = tempfile.NamedTemporaryFile()
        self.tmpfh2 = tempfile.NamedTemporaryFile()
        self.src = Connection(self.tmpfh1.name)
        self.dst = Connection(self.tmpfh2.name)
        self.fh = tempfile.TemporaryFile()

        # Disable exclusive locking for all tests
        self.src.execute('PRAGMA locking_mode = NORMAL')
        self.dst.execute('PRAGMA locking_mode = NORMAL')

        self.create_table(self.src)
        self.create_table(self.dst)
Ejemplo n.º 6
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                         {'max_obj_size': self.max_obj_size}, self.db)
        self.fsck.expect_errors = True
Ejemplo n.º 7
0
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    # Check for cached metadata
    cachepath = get_backend_cachedir(options.storage_url, options.cachedir)
    if not os.path.exists(cachepath + '.params'):
        raise QuietError("No local metadata found.")

    with open(cachepath + '.params', 'rb') as fh:
        param = pickle.load(fh)

    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new.')

    if os.path.exists(DBNAME):
        raise QuietError('%s exists, aborting.' % DBNAME)

    log.info('Copying database...')
    dst = tempfile.NamedTemporaryFile()
    with open(cachepath + '.db', 'rb') as src:
        shutil.copyfileobj(src, dst)
    dst.flush()
    db = Connection(dst.name)

    log.info('Scrambling...')
    md5 = lambda x: hashlib.md5(x).hexdigest()
    for (id_, name) in db.query('SELECT id, name FROM names'):
        db.execute('UPDATE names SET name=? WHERE id=?',
                   (md5(name), id_))

    for (id_, name) in db.query('SELECT inode, target FROM symlink_targets'):
        db.execute('UPDATE symlink_targets SET target=? WHERE inode=?',
                   (md5(name), id_))

    for (id_, name) in db.query('SELECT rowid, value FROM ext_attributes'):
        db.execute('UPDATE ext_attributes SET value=? WHERE rowid=?',
                   (md5(name), id_))

    log.info('Saving...')
    with open(DBNAME, 'wb+') as fh:
        dump_metadata(db, fh)
Ejemplo n.º 8
0
    def setUp(self):

        self.backend_dir = tempfile.mkdtemp()
        self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir, 
                                                           None, None))

        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))

        self.cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                                self.max_obj_size * 100)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
Ejemplo n.º 9
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp()
        plain_backend = local.Backend('local://' + self.backend_dir, None, None)
        self.backend_pool = BackendPool(lambda: BetterBackend(b'schwubl', 'lzma', plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        self.block_cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                                      self.max_obj_size * 5)
        self.server = fs.Operations(self.block_cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))

        self.server.init()

        # Keep track of unused filenames
        self.name_cnt = 0
Ejemplo n.º 10
0
    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(lambda: local.Backend(
            Namespace(storage_url='local://' + self.backend_dir)))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        now_ns = time_ns()
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?,?)",
            (self.inode,
             stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
             | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
             os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

        cache = BlockCache(self.backend_pool, self.db,
                           self.cachedir + "/cache", self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True

        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        s3ql.block_cache.lock = MockLock()
        s3ql.block_cache.lock_released = MockLock()
Ejemplo n.º 11
0
    def run(self):
        log.debug('MetadataDownloadThread: start')
        
        while not self.quit:
            self.event.wait(self.interval)
            self.event.clear()
            
            if self.quit:
                break
            
            with self.bucket_pool() as bucket:
                #XXX: call bucket.is_get_consistent() to verify data consistency later
                seq_no = get_seq_no(bucket)
                if seq_no > self.param['seq_no']:
                    log.info('Remote metadata is newer than local (%d vs %d), '
                              'download it', seq_no, self.param['seq_no'])
                elif seq_no < self.param['seq_no']:
                    log.warn('Remote metadata is older than local (%s vs %d), '
                              'ignore the bucket until upload metadata thread done',
                              seq_no, self.param['seq_no'])
                    continue
                else:
                    log.info('seqno equals local (%d vs %d), ignore then download',
                             seq_no, self.param['seq_no'])
                    continue
                              
                log.info("Downloading & uncompressing metadata...")
                os.close(os.open(self.cachepath + '.db.tmp',
                                 os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                                 stat.S_IRUSR | stat.S_IWUSR))

                
                db_conn = Connection(self.cachepath + '.db.tmp', fast_mode=True)
                with bucket.open_read("s3ql_metadata") as fh:
                    restore_metadata(fh, db_conn)
                db_conn.close()

                with llfuse.lock:
                    if self.quit:
                        break
                    os.rename(self.cachepath + '.db.tmp', self.cachepath + '.db')
                    self.db_mtime = os.stat(self.cachepath + '.db').st_mtime 
                    self.param['seq_no'] = seq_no

        log.debug('MetadataDownloadThread: end')    
Ejemplo n.º 12
0
 def setUp(self):
     # Destructors are not guaranteed to run, and we can't unlink
     # the file immediately because apsw refers to it by name. 
     # Therefore, we unlink the file manually in tearDown() 
     self.dbfile = tempfile.NamedTemporaryFile(delete=False)
     
     self.db = Connection(self.dbfile.name)
     create_tables(self.db)
     init_tables(self.db)
     self.cache = inode_cache.InodeCache(self.db, 0)
Ejemplo n.º 13
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        plain_backend = local.Backend('local://' + self.backend_dir, None,
                                      None)
        self.backend_pool = BackendPool(
            lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        cache = BlockCache(self.backend_pool, self.db,
                           self.cachedir + "/cache", self.max_obj_size * 5)
        self.block_cache = cache
        self.server = fs.Operations(cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))
        self.server.init()

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True

        cache.to_upload = DummyDistributor()

        # Keep track of unused filenames
        self.name_cnt = 0
Ejemplo n.º 14
0
def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    plain_backend = local.Backend(Namespace(
        storage_url='local://' + ctx.backend_dir))
    ctx.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6),
                                                          plain_backend))
    ctx.backend = ctx.backend_pool.pop_conn()
    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)

    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    # Tested methods assume that they are called from
    # file system request handler
    llfuse.lock.acquire()

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 5)
    ctx.block_cache = cache
    ctx.server = fs.Operations(cache, ctx.db, ctx.max_obj_size,
                                InodeCache(ctx.db, 0))
    ctx.server.init()

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyDistributor:
        def put(ctx, arg, timeout=None):
            cache._do_upload(*arg)
            return True
    cache.to_upload = DummyDistributor()

    # Keep track of unused filenames
    ctx.name_cnt = 0

    yield ctx

    ctx.server.inodes.destroy()
    llfuse.lock.release()
    ctx.block_cache.destroy()
    shutil.rmtree(ctx.cachedir)
    shutil.rmtree(ctx.backend_dir)
    os.unlink(ctx.dbfile.name)
    ctx.dbfile.close()
Ejemplo n.º 15
0
async def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    ctx.backend_pool = BackendPool(lambda: local.Backend(
        Namespace(storage_url='local://' + ctx.backend_dir)))

    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)
    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    # Create an inode we can work with
    ctx.inode = 42
    now_ns = time_ns()
    ctx.db.execute(
        "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
        "VALUES (?,?,?,?,?,?,?,?,?)",
        (ctx.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
         os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 100)
    cache.portal = trio.BlockingTrioPortal()
    ctx.cache = cache

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyChannel:
        async def send(self, arg):
            await trio.run_sync_in_worker_thread(cache._do_upload, *arg)

    cache.to_upload = (DummyChannel(), None)

    try:
        yield ctx
    finally:
        ctx.cache.backend_pool = ctx.backend_pool
        if ctx.cache.destroy is not None:
            await ctx.cache.destroy()
        shutil.rmtree(ctx.cachedir)
        shutil.rmtree(ctx.backend_dir)
        ctx.dbfile.close()
        os.unlink(ctx.dbfile.name)
Ejemplo n.º 16
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        self.fsck.expect_errors = True
Ejemplo n.º 17
0
    def setUp(self):
        self.bucket_dir = tempfile.mkdtemp()
        self.bucket = local.Bucket(self.bucket_dir, None, None)
        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.bucket,
                  { 'blocksize': self.blocksize }, self.db)
        self.fsck.expect_errors = True
Ejemplo n.º 18
0
async def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    plain_backend = local.Backend(
        Namespace(storage_url='local://' + ctx.backend_dir))
    ctx.backend_pool = BackendPool(
        lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend))
    ctx.backend = ctx.backend_pool.pop_conn()
    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)

    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 5)
    cache.portal = trio.BlockingTrioPortal()
    ctx.cache = cache
    ctx.server = fs.Operations(cache, ctx.db, ctx.max_obj_size,
                               InodeCache(ctx.db, 0))
    ctx.server.init()

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyChannel:
        async def send(self, arg):
            await trio.run_sync_in_worker_thread(cache._do_upload, *arg)

    cache.to_upload = (DummyChannel(), None)

    # Keep track of unused filenames
    ctx.name_cnt = 0

    yield ctx

    ctx.server.inodes.destroy()
    await ctx.cache.destroy()
    shutil.rmtree(ctx.cachedir)
    shutil.rmtree(ctx.backend_dir)
    os.unlink(ctx.dbfile.name)
    ctx.dbfile.close()
Ejemplo n.º 19
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp()
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        self.fsck.expect_errors = True
Ejemplo n.º 20
0
    def test(self):
        skip_without_rsync()

        ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-')
        try:
            self.populate_dir(ref_dir)

            # Copy source data
            self.mkfs()

            # Force 64bit inodes
            cachepath = os.path.join(self.cache_dir, _escape(self.storage_url))
            db = Connection(cachepath + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2**36 + 10, 'inodes'))
            db.close()

            self.mount()
            subprocess.check_call(
                ['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/'])
            self.umount()
            self.fsck()

            # Delete cache, run fsck and compare
            shutil.rmtree(self.cache_dir)
            self.cache_dir = tempfile.mkdtemp('s3ql-cache-')
            self.fsck()
            self.mount()
            try:
                out = check_output([
                    'rsync', '-anciHAX', '--delete', '--exclude',
                    '/lost+found', ref_dir + '/', self.mnt_dir + '/'
                ],
                                   universal_newlines=True,
                                   stderr=subprocess.STDOUT)
            except CalledProcessError as exc:
                pytest.fail('rsync failed with ' + exc.output)
            if out:
                pytest.fail('Copy not equal to original, rsync says:\n' + out)

            self.umount()

            # Delete cache and mount
            shutil.rmtree(self.cache_dir)
            self.cache_dir = tempfile.mkdtemp(prefix='s3ql-cache-')
            self.mount()
            self.umount()

        finally:
            shutil.rmtree(ref_dir)
Ejemplo n.º 21
0
    def setUp(self):

        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))

        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)",
            (
                self.inode,
                stat.S_IFREG
                | stat.S_IRUSR
                | stat.S_IWUSR
                | stat.S_IXUSR
                | stat.S_IRGRP
                | stat.S_IXGRP
                | stat.S_IROTH
                | stat.S_IXOTH,
                os.getuid(),
                os.getgid(),
                time.time(),
                time.time(),
                time.time(),
                1,
                32,
            ),
        )

        self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
Ejemplo n.º 22
0
    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir,
                                                           None, None))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        now_ns = time_ns()
        self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                         os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
Ejemplo n.º 23
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        plain_backend = local.Backend('local://' + self.backend_dir, None, None)
        self.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6),
                                                              plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 5)
        self.block_cache = cache
        self.server = fs.Operations(cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))
        self.server.init()

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Keep track of unused filenames
        self.name_cnt = 0
Ejemplo n.º 24
0
    def setUp(self):
        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))
        self.bucket = self.bucket_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
        
        self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir,
                                      self.blocksize * 5)
        self.server = fs.Operations(self.block_cache, self.db, self.blocksize)
          
        self.server.init()

        # Keep track of unused filenames
        self.name_cnt = 0
Ejemplo n.º 25
0
class fs_api_tests(unittest.TestCase):

    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        plain_backend = local.Backend('local://' + self.backend_dir, None, None)
        self.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6),
                                                              plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 5)
        self.block_cache = cache
        self.server = fs.Operations(cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))
        self.server.init()

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Keep track of unused filenames
        self.name_cnt = 0

    def tearDown(self):
        self.server.inodes.destroy()
        llfuse.lock.release()
        self.block_cache.destroy()
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        os.unlink(self.dbfile.name)
        self.dbfile.close()

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fd:
            return fd.read(len_)

    def fsck(self):
        self.block_cache.clear()
        self.server.inodes.flush()
        fsck = Fsck(self.cachedir + '/cache', self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        fsck.check()
        self.assertFalse(fsck.found_errors)

    def newname(self):
        self.name_cnt += 1
        return ("s3ql_%d" % self.name_cnt).encode()

    def test_getattr_root(self):
        self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode))
        self.fsck()

    def test_create(self):
        ctx = Ctx()
        mode = self.dir_mode()
        name = self.newname()

        inode_p_old = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.server._create(ROOT_INODE, name, mode, ctx)

        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON name_id = names.id '
                              'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))

        inode = self.server.getattr(id_)

        self.assertEqual(inode.mode, mode)
        self.assertEqual(inode.uid, ctx.uid)
        self.assertEqual(inode.gid, ctx.gid)
        self.assertEqual(inode.refcount, 1)
        self.assertEqual(inode.size, 0)

        inode_p_new = self.server.getattr(ROOT_INODE)

        self.assertGreater(inode_p_new.mtime, inode_p_old.mtime)
        self.assertGreater(inode_p_new.ctime, inode_p_old.ctime)

        self.server.forget([(id_, 1)])
        self.fsck()

    def test_extstat(self):
        # Test with zero contents
        self.server.extstat()

        # Test with empty file
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.extstat()

        # Test with data in file
        fh = self.server.open(inode.id, os.O_RDWR)
        self.server.write(fh, 0, b'foobar')
        self.server.release(fh)

        self.server.extstat()
        self.server.forget([(inode.id, 1)])
        self.fsck()

    @staticmethod
    def dir_mode():
        return (randint(0, 0o7777) & ~stat.S_IFDIR) | stat.S_IFDIR

    @staticmethod
    def file_mode():
        return (randint(0, 0o7777) & ~stat.S_IFREG) | stat.S_IFREG

    def test_getxattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        self.assertRaises(FUSEError, self.server.getxattr, inode.id, b'nonexistant-attr')

        self.server.setxattr(inode.id, b'my-attr', b'strabumm!')
        self.assertEqual(self.server.getxattr(inode.id, b'my-attr'), b'strabumm!')

        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_link(self):
        name = self.newname()

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(),
                                        self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()

        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)
        safe_sleep(CLOCK_GRANULARITY)

        inode_before = self.server.getattr(inode.id).copy()
        self.server.link(inode.id, inode_p_new.id, name)

        inode_after = self.server.lookup(inode_p_new.id, name)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                              'WHERE name=? AND parent_inode = ?', (name, inode_p_new.id))

        self.assertEqual(inode_before.id, id_)
        self.assertEqual(inode_after.refcount, 2)
        self.assertGreater(inode_after.ctime, inode_before.ctime)
        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.server.forget([(inode.id, 1), (inode_p_new.id, 1), (inode_after.id, 1)])
        self.fsck()

    def test_listxattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        self.assertListEqual([], self.server.listxattr(inode.id))

        self.server.setxattr(inode.id, b'key1', b'blub')
        self.assertListEqual([b'key1'], self.server.listxattr(inode.id))

        self.server.setxattr(inode.id, b'key2', b'blub')
        self.assertListEqual(sorted([b'key1', b'key2']),
                             sorted(self.server.listxattr(inode.id)))
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_read(self):

        len_ = self.max_obj_size
        data = self.random_data(len_)
        off = self.max_obj_size // 2
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                     self.file_mode(), os.O_RDWR, Ctx())

        self.server.write(fh, off, data)
        inode_before = self.server.getattr(inode.id).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.assertTrue(self.server.read(fh, off, len_) == data)
        inode_after = self.server.getattr(inode.id)
        self.assertGreater(inode_after.atime, inode_before.atime)
        self.assertTrue(self.server.read(fh, 0, len_) == b"\0" * off + data[:off])
        self.assertTrue(self.server.read(fh, self.max_obj_size, len_) == data[off:])
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_readdir(self):

        # Create a few entries
        names = [ ('entry_%2d' % i).encode() for i in range(20) ]
        for name in names:
            (fh, inode) = self.server.create(ROOT_INODE, name,
                                         self.file_mode(), os.O_RDWR, Ctx())
            self.server.release(fh)
            self.server.forget([(inode.id, 1)])

        # Delete some to make sure that we don't have continous rowids
        remove_no = [0, 2, 3, 5, 9]
        for i in remove_no:
            self.server.unlink(ROOT_INODE, names[i])
            del names[i]

        # Read all
        fh = self.server.opendir(ROOT_INODE)
        self.assertListEqual(sorted(names + [b'lost+found']) ,
                             sorted(x[0] for x in self.server.readdir(fh, 0)))
        self.server.releasedir(fh)

        # Read in parts
        fh = self.server.opendir(ROOT_INODE)
        entries = list()
        try:
            next_ = 0
            while True:
                gen = self.server.readdir(fh, next_)
                for _ in range(3):
                    (name, _, next_) = next(gen)
                    entries.append(name)

        except StopIteration:
            pass

        self.assertListEqual(sorted(names + [b'lost+found']) ,
                             sorted(entries))
        self.server.releasedir(fh)

        self.fsck()

    def test_forget(self):
        name = self.newname()

        # Test that entries are deleted when they're no longer referenced
        (fh, inode) = self.server.create(ROOT_INODE, name,
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'foobar')
        self.server.unlink(ROOT_INODE, name)
        self.assertFalse(self.db.has_val('SELECT 1 FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertTrue(self.server.getattr(inode.id).id)
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))

        self.fsck()

    def test_removexattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        self.assertRaises(FUSEError, self.server.removexattr, inode.id, b'some name')
        self.server.setxattr(inode.id, b'key1', b'blub')
        self.server.removexattr(inode.id, b'key1')
        self.assertListEqual([], self.server.listxattr(inode.id))
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_rename(self):
        oldname = self.newname()
        newname = self.newname()

        inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
        inode_p_old_before = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)

        self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)

        inode_p_old_after = self.server.getattr(ROOT_INODE)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE)))
        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id == name_id '
                              'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id))
        self.assertEqual(inode.id, id_)

        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
        self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)

        self.server.forget([(inode.id, 1), (inode_p_new.id, 1)])
        self.fsck()

    def test_replace_file(self):
        oldname = self.newname()
        newname = self.newname()

        (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'some data to deal with')
        self.server.release(fh)
        self.server.setxattr(inode.id, b'test_xattr', b'42*8')

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
        inode_p_old_before = self.server.getattr(ROOT_INODE).copy()

        (fh, inode2) = self.server.create(inode_p_new.id, newname, self.file_mode(),
                                          os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'even more data to deal with')
        self.server.release(fh)
        self.server.setxattr(inode2.id, b'test_xattr', b'42*8')
        self.server.forget([(inode2.id, 1)])

        safe_sleep(CLOCK_GRANULARITY)
        self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)

        inode_p_old_after = self.server.getattr(ROOT_INODE)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE)))
        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                              'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id))
        self.assertEqual(inode.id, id_)

        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
        self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)

        self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,)))
        self.server.forget([(inode.id, 1), (inode_p_new.id, 1)])
        self.fsck()

    def test_replace_dir(self):
        oldname = self.newname()
        newname = self.newname()

        inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
        inode_p_old_before = self.server.getattr(ROOT_INODE).copy()

        inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(), Ctx())
        self.server.forget([(inode2.id, 1)])

        safe_sleep(CLOCK_GRANULARITY)
        self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)

        inode_p_old_after = self.server.getattr(ROOT_INODE)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE)))
        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                              'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id))
        self.assertEqual(inode.id, id_)

        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
        self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)

        self.server.forget([(inode.id, 1), (inode_p_new.id, 1)])
        self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,)))
        self.fsck()

    def test_setattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0o641,
                                         os.O_RDWR, Ctx())
        self.server.release(fh)
        inode_old = self.server.getattr(inode.id).copy()

        attr = llfuse.EntryAttributes()
        attr.st_mode = self.file_mode()
        attr.st_uid = randint(0, 2 ** 32)
        attr.st_gid = None
        attr.st_atime = randint(0, 2 ** 32) / 10 ** 6
        attr.st_mtime = randint(0, 2 ** 32) / 10 ** 6

        safe_sleep(CLOCK_GRANULARITY)
        self.server.setattr(inode.id, attr)
        inode_new = self.server.getattr(inode.id)
        self.assertGreater(inode_new.ctime, inode_old.ctime)

        for key in attr.__slots__:
            if key in ('st_mode', 'st_uid', 'st_atime', 'st_mtime'):
                self.assertEqual(getattr(attr, key),
                                 getattr(inode_new, key), '%s mismatch' % key)
            elif key != 'st_ctime':
                self.assertEqual(getattr(inode_old, key),
                                 getattr(inode_new, key), '%s mismatch' % key)

        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_truncate(self):
        len_ = int(2.7 * self.max_obj_size)
        data = self.random_data(len_)
        attr = llfuse.EntryAttributes()

        (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)

        attr.st_size = len_ // 2
        self.server.setattr(inode.id, attr)
        self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2])
        attr.st_size = len_
        self.server.setattr(inode.id, attr)
        self.assertTrue(self.server.read(fh, 0, len_)
                        == data[:len_ // 2] + b'\0' * (len_ // 2))
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_truncate_0(self):
        len1 = 158
        len2 = 133
        attr = llfuse.EntryAttributes()

        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, self.random_data(len1))
        self.server.release(fh)
        self.server.inodes.flush()

        fh = self.server.open(inode.id, os.O_RDWR)
        attr.st_size = 0
        self.server.setattr(inode.id, attr)
        self.server.write(fh, 0, self.random_data(len2))
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_setxattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        self.server.setxattr(inode.id, b'my-attr', b'strabumm!')
        self.assertEqual(self.server.getxattr(inode.id, b'my-attr'), b'strabumm!')
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_names(self):
        name1 = self.newname()
        name2 = self.newname()

        (fh, inode) = self.server.create(ROOT_INODE, name1, self.file_mode(),
                                     os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        (fh, inode) = self.server.create(ROOT_INODE, name2, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.release(fh)

        self.server.setxattr(inode.id, name1, b'strabumm!')
        self.fsck()

        self.server.removexattr(inode.id, name1)
        self.fsck()

        self.server.setxattr(inode.id, name1, b'strabumm karacho!!')
        self.server.unlink(ROOT_INODE, name1)
        self.server.forget([(inode.id, 1)])
        self.fsck()


    def test_statfs(self):
        # Test with zero contents
        self.server.statfs()

        # Test with empty file
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.statfs()

        # Test with data in file
        fh = self.server.open(inode.id, os.O_RDWR)
        self.server.write(fh, 0, b'foobar')
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.server.statfs()

    def test_symlink(self):
        target = self.newname()
        name = self.newname()

        inode_p_before = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)
        inode = self.server.symlink(ROOT_INODE, name, target, Ctx())
        inode_p_after = self.server.getattr(ROOT_INODE)

        self.assertEqual(target, self.server.readlink(inode.id))

        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                              'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))

        self.assertEqual(inode.id, id_)
        self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
        self.assertLess(inode_p_before.ctime, inode_p_after.ctime)

        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_unlink(self):
        name = self.newname()

        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'some data to deal with')
        self.server.release(fh)

        # Add extended attributes
        self.server.setxattr(inode.id, b'test_xattr', b'42*8')
        self.server.forget([(inode.id, 1)])

        inode_p_before = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.server.unlink(ROOT_INODE, name)
        inode_p_after = self.server.getattr(ROOT_INODE)

        self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
        self.assertLess(inode_p_before.ctime, inode_p_after.ctime)

        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))

        self.fsck()

    def test_rmdir(self):
        name = self.newname()
        inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx())
        self.server.forget([(inode.id, 1)])
        inode_p_before = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.server.rmdir(ROOT_INODE, name)
        inode_p_after = self.server.getattr(ROOT_INODE)

        self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
        self.assertLess(inode_p_before.ctime, inode_p_after.ctime)
        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))

        self.fsck()

    def test_relink(self):
        name = self.newname()
        name2 = self.newname()
        data = b'some data to deal with'

        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)
        self.server.release(fh)
        self.server.unlink(ROOT_INODE, name)
        self.server.inodes.flush()
        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertTrue(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))
        self.server.link(inode.id, ROOT_INODE, name2)
        self.server.forget([(inode.id, 2)])

        inode = self.server.lookup(ROOT_INODE, name2)
        fh = self.server.open(inode.id, os.O_RDONLY)
        self.assertTrue(self.server.read(fh, 0, len(data)) == data)
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_write(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        off = self.max_obj_size // 2
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                     self.file_mode(), os.O_RDWR, Ctx())
        inode_before = self.server.getattr(inode.id).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.server.write(fh, off, data)
        inode_after = self.server.getattr(inode.id)

        self.assertGreater(inode_after.mtime, inode_before.mtime)
        self.assertGreater(inode_after.ctime, inode_before.ctime)
        self.assertEqual(inode_after.size, off + len_)

        self.server.write(fh, 0, data)
        inode_after = self.server.getattr(inode.id)
        self.assertEqual(inode_after.size, off + len_)

        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_failsafe(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                     self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)
        self.server.cache.clear()
        self.assertTrue(self.server.failsafe is False)

        datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1')
        shutil.copy(datafile, datafile + '.bak')

        # Modify contents
        with open(datafile, 'rb+') as rfh:
            rfh.seek(560)
            rfh.write(b'blrub!')
        with self.assertRaises(FUSEError) as cm:
            with catch_logmsg('^Backend returned malformed data for',
                              count=1, level=logging.ERROR):
                self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)
        self.assertTrue(self.server.failsafe)

        # Restore contents, but should be marked as damaged now
        os.rename(datafile + '.bak', datafile)
        with self.assertRaises(FUSEError) as cm:
            self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)

        # Release and re-open, now we should be able to access again
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        # ..but not write access since we are in failsafe mode
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode.id, os.O_RDWR)
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # ..ready only is fine.
        fh = self.server.open(inode.id, os.O_RDONLY)
        self.server.read(fh, 0, len_)

        # Remove completely, should give error after cache flush
        os.unlink(datafile)
        self.server.read(fh, 3, len_//2)
        self.server.cache.clear()
        with self.assertRaises(FUSEError) as cm:
            with catch_logmsg('^Backend lost block',
                              count=1, level=logging.ERROR):
                self.server.read(fh, 5, len_//2)
        self.assertEqual(cm.exception.errno, errno.EIO)


        # Don't call fsck, we're missing a block

    def test_create_open(self):
        name = self.newname()
        # Create a new file
        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.forget([(inode, 1)])

        # Open it atomically
        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.forget([(inode, 1)])

        self.fsck()

    def test_edit(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)
        self.server.release(fh)

        self.block_cache.clear()

        fh = self.server.open(inode.id, os.O_RDWR)
        attr = llfuse.EntryAttributes()
        attr.st_size = 0
        self.server.setattr(inode.id, attr)
        self.server.write(fh, 0, data[50:])
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_copy_tree(self):
        ext_attr_name = b'system.foo.brazl'
        ext_attr_val = b'schulla dku woumm bramp'

        src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx())
        dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(), Ctx())

        # Create file
        (fh, f1_inode) = self.server.create(src_inode.id, b'file1',
                                            self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file1 contents')
        self.server.release(fh)
        self.server.setxattr(f1_inode.id, ext_attr_name, ext_attr_val)

        # Create hardlink
        (fh, f2_inode) = self.server.create(src_inode.id, b'file2',
                                            self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file2 contents')
        self.server.release(fh)
        f2_inode = self.server.link(f2_inode.id, src_inode.id, b'file2_hardlink')

        # Create subdirectory
        d1_inode = self.server.mkdir(src_inode.id, b'dir1', self.dir_mode(), Ctx())
        d2_inode = self.server.mkdir(d1_inode.id, b'dir2', self.dir_mode(), Ctx())

        # ..with a 3rd hardlink
        f2_inode = self.server.link(f2_inode.id, d1_inode.id, b'file2_hardlink')

        # Replicate
        self.server.copy_tree(src_inode.id, dst_inode.id)

        # Change files
        fh = self.server.open(f1_inode.id, os.O_RDWR)
        self.server.write(fh, 0, b'new file1 contents')
        self.server.release(fh)

        fh = self.server.open(f2_inode.id, os.O_RDWR)
        self.server.write(fh, 0, b'new file2 contents')
        self.server.release(fh)

        # Get copy properties
        f1_inode_c = self.server.lookup(dst_inode.id, b'file1')
        f2_inode_c = self.server.lookup(dst_inode.id, b'file2')
        f2h_inode_c = self.server.lookup(dst_inode.id, b'file2_hardlink')
        d1_inode_c = self.server.lookup(dst_inode.id, b'dir1')
        d2_inode_c = self.server.lookup(d1_inode_c.id, b'dir2')
        f2_h_inode_c = self.server.lookup(d1_inode_c.id, b'file2_hardlink')

        # Check file1
        fh = self.server.open(f1_inode_c.id, os.O_RDWR)
        self.assertEqual(self.server.read(fh, 0, 42), b'file1 contents')
        self.server.release(fh)
        self.assertNotEqual(f1_inode.id, f1_inode_c.id)
        self.assertEqual(self.server.getxattr(f1_inode_c.id, ext_attr_name),
                         ext_attr_val)

        # Check file2
        fh = self.server.open(f2_inode_c.id, os.O_RDWR)
        self.assertTrue(self.server.read(fh, 0, 42) == b'file2 contents')
        self.server.release(fh)
        self.assertEqual(f2_inode_c.id, f2h_inode_c.id)
        self.assertEqual(f2_inode_c.refcount, 3)
        self.assertNotEqual(f2_inode.id, f2_inode_c.id)
        self.assertEqual(f2_h_inode_c.id, f2_inode_c.id)

        # Check subdir1
        self.assertNotEqual(d1_inode.id, d1_inode_c.id)
        self.assertNotEqual(d2_inode.id, d2_inode_c.id)
        self.server.forget(list(self.server.open_inodes.items()))
        self.fsck()

    def test_copy_tree_2(self):
        src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx())
        dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(), Ctx())

        # Create file
        (fh, inode) = self.server.create(src_inode.id, b'file1',
                                     self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'block 1 contents')
        self.server.write(fh, self.max_obj_size, b'block 1 contents')
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        self.server.copy_tree(src_inode.id, dst_inode.id)

        self.server.forget([(src_inode.id, 1), (dst_inode.id, 1)])
        self.fsck()

    def test_lock_tree(self):

        inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx())

        # Create file
        (fh, inode1a) = self.server.create(inode1.id, b'file1',
                                            self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file1 contents')
        self.server.release(fh)

        # Create subdirectory
        inode2 = self.server.mkdir(inode1.id, b'dir1', self.dir_mode(), Ctx())
        (fh, inode2a) = self.server.create(inode2.id, b'file2',
                                           self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file2 contents')
        self.server.release(fh)

        # Another file
        (fh, inode3) = self.server.create(ROOT_INODE, b'file1',
                                          self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        # Lock
        self.server.lock_tree(inode1.id)

        for i in (inode1.id, inode1a.id, inode2.id, inode2a.id):
            self.assertTrue(self.server.inodes[i].locked)

        # Remove
        with self.assertRaises(FUSEError) as cm:
            self.server._remove(inode1.id, b'file1', inode1a.id)
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # Rename / Replace
        with self.assertRaises(FUSEError) as cm:
            self.server.rename(ROOT_INODE, b'file1', inode1.id, b'file2')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        with self.assertRaises(FUSEError) as cm:
            self.server.rename(inode1.id, b'file1', ROOT_INODE, b'file2')
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # Open
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode2a.id, os.O_RDWR)
        self.assertEqual(cm.exception.errno, errno.EPERM)
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode2a.id, os.O_WRONLY)
        self.assertEqual(cm.exception.errno, errno.EPERM)
        self.server.release(self.server.open(inode3.id, os.O_WRONLY))

        # Write
        fh = self.server.open(inode2a.id, os.O_RDONLY)
        with self.assertRaises(FUSEError) as cm:
            self.server.write(fh, 0, b'foo')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        self.server.release(fh)

        # Create
        with self.assertRaises(FUSEError) as cm:
            self.server._create(inode2.id, b'dir1', self.dir_mode(), os.O_RDWR, Ctx())
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # Setattr
        with self.assertRaises(FUSEError) as cm:
            self.server.setattr(inode2a.id, dict())
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # xattr
        with self.assertRaises(FUSEError) as cm:
            self.server.setxattr(inode2.id, b'name', b'value')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        with self.assertRaises(FUSEError) as cm:
            self.server.removexattr(inode2.id, b'name')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        self.server.forget(list(self.server.open_inodes.items()))
        self.fsck()

    def test_remove_tree(self):

        inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(), Ctx())

        # Create file
        (fh, inode1a) = self.server.create(inode1.id, b'file1',
                                            self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file1 contents')
        self.server.release(fh)

        # Create subdirectory
        inode2 = self.server.mkdir(inode1.id, b'dir1', self.dir_mode(), Ctx())
        (fh, inode2a) = self.server.create(inode2.id, b'file2',
                                           self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file2 contents')
        self.server.release(fh)

        # Remove
        self.server.forget(list(self.server.open_inodes.items()))
        self.server.remove_tree(ROOT_INODE, b'source')

        for (id_p, name) in ((ROOT_INODE, b'source'),
                             (inode1.id, b'file1'),
                             (inode1.id, b'dir1'),
                             (inode2.id, b'file2')):
            self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                             'WHERE name=? AND parent_inode = ?', (name, id_p)))

        for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id):
            self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_,)))

        self.fsck()
Ejemplo n.º 26
0
    def test(self):
        skip_without_rsync()
        ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-')
        try:
            populate_dir(ref_dir)

            # Make file system and fake high inode number
            self.mkfs()
            db = Connection(
                get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2**31 + 10, 'inodes'))
            db.close()

            # Copy source data
            self.mount()
            subprocess.check_call(
                ['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/'])
            self.umount()

            # Check that inode watermark is high
            db = Connection(
                get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                              ('inodes', )) > 2**31 + 10
            assert db.get_val('SELECT MAX(id) FROM inodes') > 2**31 + 10
            db.close()

            # Renumber inodes
            self.fsck()

            # Check if renumbering was done
            db = Connection(
                get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                              ('inodes', )) < 2**31
            assert db.get_val('SELECT MAX(id) FROM inodes') < 2**31
            db.close()

            # Compare
            self.mount()
            try:
                out = check_output([
                    'rsync', '-anciHAX', '--delete', '--exclude',
                    '/lost+found', ref_dir + '/', self.mnt_dir + '/'
                ],
                                   universal_newlines=True,
                                   stderr=subprocess.STDOUT)
            except CalledProcessError as exc:
                pytest.fail('rsync failed with ' + exc.output)
            if out:
                pytest.fail('Copy not equal to original, rsync says:\n' + out)

            self.umount()
        finally:
            shutil.rmtree(ref_dir)
Ejemplo n.º 27
0
    def runTest(self):
        try:
            subprocess.call(['rsync', '--version'],
                            stderr=subprocess.STDOUT,
                            stdout=open('/dev/null', 'wb'))
        except OSError as exc:
            if exc.errno == errno.ENOENT:
                raise unittest.SkipTest('rsync not installed')
            raise

        ref_dir = tempfile.mkdtemp()
        try:
            populate_dir(ref_dir)

            # Make file system and fake high inode number
            self.mkfs()
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2 ** 31 + 10, u'inodes'))
            db.close()

            # Copy source data
            self.mount()
            subprocess.check_call(['rsync', '-aHAX', ref_dir + '/',
                                   self.mnt_dir + '/'])
            self.umount()

            # Check that inode watermark is high
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            self.assertGreater(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31 + 10)
            self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31 + 10)
            db.close()

            # Renumber inodes
            self.fsck()

            # Check if renumbering was done
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            self.assertLess(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31)
            self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31)
            db.close()

            # Compare
            self.mount()
            rsync = subprocess.Popen(['rsync', '-anciHAX', '--delete',
                                      '--exclude', '/lost+found',
                                      ref_dir + '/', self.mnt_dir + '/'],
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.STDOUT)
            out = rsync.communicate()[0]
            if out:
                self.fail('Copy not equal to original, rsync says:\n' + out)
            elif rsync.returncode != 0:
                self.fail('rsync failed with ' + out)

            self.umount()
        finally:
            shutil.rmtree(ref_dir)
Ejemplo n.º 28
0
Archivo: adm.py Proyecto: drewlu/ossql
def upgrade(bucket):
    '''Upgrade file system to newest revision'''

    # Access to protected member
    #pylint: disable=W0212
    log.info('Getting file system parameters..')
    seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in bucket.list('s3ql_seq_no_') ]
    seq_no = max(seq_nos)
    if not seq_nos:
        raise QuietError(textwrap.dedent(''' 
            File system revision too old to upgrade!
            
            You need to use an older S3QL version to upgrade to a more recent
            revision before you can use this version to upgrade to the newest
            revision.
            '''))                     
    param = bucket.lookup('s3ql_metadata')

    # Check for unclean shutdown
    if param['seq_no'] < seq_no:
        if bucket.is_get_consistent():
            raise QuietError(textwrap.fill(textwrap.dedent('''\
                It appears that the file system is still mounted somewhere else. If this is not
                the case, the file system may have not been unmounted cleanly and you should try
                to run fsck on the computer where the file system has been mounted most recently.
                ''')))
        else:                
            raise QuietError(textwrap.fill(textwrap.dedent('''\
                It appears that the file system is still mounted somewhere else. If this is not the
                case, the file system may have not been unmounted cleanly or the data from the 
                most-recent mount may have not yet propagated through the backend. In the later case,
                waiting for a while should fix the problem, in the former case you should try to run
                fsck on the computer where the file system has been mounted most recently.
                ''')))    

    # Check that the fs itself is clean
    if param['needs_fsck']:
        raise QuietError("File system damaged, run fsck!")
    
    # Check revision
    if param['revision'] < CURRENT_FS_REV - 1:
        raise QuietError(textwrap.dedent(''' 
            File system revision too old to upgrade!
            
            You need to use an older S3QL version to upgrade to a more recent
            revision before you can use this version to upgrade to the newest
            revision.
            '''))

    elif param['revision'] >= CURRENT_FS_REV:
        print('File system already at most-recent revision')
        return
    
    print(textwrap.dedent('''
        I am about to update the file system to the newest revision. 
        You will not be able to access the file system with any older version
        of S3QL after this operation. 
        
        You should make very sure that this command is not interrupted and
        that no one else tries to mount, fsck or upgrade the file system at
        the same time.
        '''))

    print('Please enter "yes" to continue.', '> ', sep='\n', end='')

    if sys.stdin.readline().strip().lower() != 'yes':
        raise QuietError()

    log.info('Upgrading from revision %d to %d...', CURRENT_FS_REV - 1,
             CURRENT_FS_REV)
    
    
    if 's3ql_hash_check_status' not in bucket:        
        if (isinstance(bucket, LegacyLocalBucket) or
            (isinstance(bucket, BetterBucket) and
             isinstance(bucket.bucket, LegacyLocalBucket))):
            if isinstance(bucket, LegacyLocalBucket):
                bucketpath = bucket.name
            else:
                bucketpath = bucket.bucket.name
            for (path, _, filenames) in os.walk(bucketpath, topdown=True):
                for name in filenames:
                    if not name.endswith('.meta'):
                        continue
                    
                    basename = os.path.splitext(name)[0]
                    if '=00' in basename:
                        raise RuntimeError("No, seriously, you tried to break things, didn't you?")
                    
                    with open(os.path.join(path, name), 'r+b') as dst:
                        dst.seek(0, os.SEEK_END)
                        with open(os.path.join(path, basename + '.dat'), 'rb') as src:
                            shutil.copyfileobj(src, dst)
                    
                    basename = basename.replace('#', '=23')
                    os.rename(os.path.join(path, name),
                              os.path.join(path, basename))
                    os.unlink(os.path.join(path, basename + '.dat'))
                    
            if isinstance(bucket, LegacyLocalBucket):
                bucket = LocalBucket(bucket.name, None, None)
            else:
                bucket.bucket = LocalBucket(bucket.bucket.name, None, None)
                         
        # Download metadata
        log.info("Downloading & uncompressing metadata...")
        dbfile = tempfile.NamedTemporaryFile()
        with tempfile.TemporaryFile() as tmp:    
            with bucket.open_read("s3ql_metadata") as fh:
                shutil.copyfileobj(fh, tmp)
        
            db = Connection(dbfile.name, fast_mode=True)
            tmp.seek(0)
            restore_legacy_metadata(tmp, db)
    
        # Increase metadata sequence no
        param['seq_no'] += 1
        bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
        for i in seq_nos:
            if i < param['seq_no'] - 5:
                del bucket['s3ql_seq_no_%d' % i ]
    
        log.info("Uploading database..")
        cycle_metadata(bucket)
        param['last-modified'] = time.time() - time.timezone
        with bucket.open_write("s3ql_metadata", param) as fh:
            dump_metadata(fh, db)
            
    else:
        log.info("Downloading & uncompressing metadata...")
        dbfile = tempfile.NamedTemporaryFile()
        with tempfile.TemporaryFile() as tmp:    
            with bucket.open_read("s3ql_metadata") as fh:
                shutil.copyfileobj(fh, tmp)
        
            db = Connection(dbfile.name, fast_mode=True)
            tmp.seek(0)
            restore_metadata(tmp, db)        

    print(textwrap.dedent('''
        The following process may take a long time, but can be interrupted
        with Ctrl-C and resumed from this point by calling `s3qladm upgrade`
        again. Please see Changes.txt for why this is necessary.
        '''))

    if 's3ql_hash_check_status' not in bucket:
        log.info("Starting hash verification..")
        start_obj = 0
    else:
        start_obj = int(bucket['s3ql_hash_check_status'])
        log.info("Resuming hash verification with object %d..", start_obj)
    
    try:
        total = db.get_val('SELECT COUNT(id) FROM objects')
        i = 0
        for (obj_id, hash_) in db.query('SELECT obj_id, hash FROM blocks JOIN objects '
                                        'ON obj_id == objects.id WHERE obj_id > ? '
                                        'ORDER BY obj_id ASC', (start_obj,)):
            if i % 100 == 0:
                log.info(' ..checked %d/%d objects..', i, total)
              
            sha = hashlib.sha256()
            with bucket.open_read("s3ql_data_%d" % obj_id) as fh:
                while True:
                    buf = fh.read(128*1024)
                    if not buf:
                        break
                    sha.update(buf)
                    
            if sha.digest() != hash_:
                log.warn('Object %d corrupted! Deleting..', obj_id)
                bucket.delete('s3ql_data_%d' % obj_id)
            i += 1
            
    except KeyboardInterrupt:
        log.info("Storing verification status...")
        bucket['s3ql_hash_check_status'] = '%d' % obj_id
        raise QuietError('Aborting..')
    
    log.info('Running fsck...')
    fsck = Fsck(tempfile.mkdtemp(), bucket, param, db)
    fsck.check()    
    
    if fsck.uncorrectable_errors:
        raise QuietError("Uncorrectable errors found, aborting.")
            
    param['revision'] = CURRENT_FS_REV
    param['seq_no'] += 1
    bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'

    log.info("Uploading database..")
    cycle_metadata(bucket)
    param['last-modified'] = time.time() - time.timezone
    with bucket.open_write("s3ql_metadata", param) as fh:
        dump_metadata(fh, db)
Ejemplo n.º 29
0
class DumpTests(unittest.TestCase):
    def setUp(self):
        self.tmpfh1 = tempfile.NamedTemporaryFile()
        self.tmpfh2 = tempfile.NamedTemporaryFile()
        self.src = Connection(self.tmpfh1.name)
        self.dst = Connection(self.tmpfh2.name)
        self.fh = tempfile.TemporaryFile()

        # Disable exclusive locking for all tests
        self.src.execute('PRAGMA locking_mode = NORMAL')
        self.dst.execute('PRAGMA locking_mode = NORMAL')

        self.create_table(self.src)
        self.create_table(self.dst)

    def tearDown(self):
        self.src.close()
        self.dst.close()
        self.tmpfh1.close()
        self.tmpfh2.close()
        self.fh.close()

    def test_transactions(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        self.dst.execute('PRAGMA journal_mode = WAL')

        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh, trx_rows=10)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_1(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_2(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_3(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_auto(self):
        self.fill_vals(self.src)
        self.fill_buf(self.src)
        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.BLOB))
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_fixed(self):
        BUFLEN = 32
        self.fill_vals(self.src)
        self.fill_buf(self.src, BUFLEN)
        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.BLOB, BUFLEN))
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_1(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_2(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_3(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_4_time(self):
        self.fill_vals(self.src)

        t1 = 0.5 * time.time()
        t2 = 2 * time.time()
        for (id_,) in self.src.query('SELECT id FROM test'):
            val = random.uniform(t1, t2)
            self.src.execute('UPDATE test SET buf=? WHERE id=?', (val, id_))

        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.TIME))

        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)

        self.compare_tables(self.src, self.dst)


    def test_5_multi(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.dst.execute('DELETE FROM test')
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)


    def compare_tables(self, db1, db2):
        i1 = db1.query('SELECT id, buf FROM test ORDER BY id')
        i2 = db2.query('SELECT id, buf FROM test ORDER BY id')

        for (id1, buf1) in i1:
            (id2, buf2) = next(i2)

            self.assertEqual(id1, id2)
            if isinstance(buf1, float):
                self.assertAlmostEqual(buf1, buf2, places=9)
            else:
                self.assertEqual(buf1, buf2)

        self.assertRaises(StopIteration, i2.__next__)

    def fill_buf(self, db, len_=None):
        with open('/dev/urandom', 'rb') as rfh:
            first = True
            for (id_,) in db.query('SELECT id FROM test'):
                if len_ is None and first:
                    val = b'' # We always want to check this case
                    first = False
                elif len_ is None:
                    val = rfh.read(random.randint(0, 140))
                else:
                    val = rfh.read(len_)

                db.execute('UPDATE test SET buf=? WHERE id=?', (val, id_))

    def fill_vals(self, db):
        vals = []
        for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33, 62]:
            vals += list(range(2 ** exp - 5, 2 ** exp + 6))
        vals += list(range(2 ** 63 - 5, 2 ** 63))
        vals += [ -v for v  in vals ]
        vals.append(-(2 ** 63))

        for val in vals:
            db.execute('INSERT INTO test (id) VALUES(?)', (val,))

    def fill_deltas(self, db):
        deltas = []
        for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33]:
            deltas += list(range(2 ** exp - 5, 2 ** exp + 6))
        deltas += [ -v for v  in deltas ]

        last = 0
        for delta in deltas:
            val = last + delta
            last = val
            db.execute('INSERT INTO test (id) VALUES(?)', (val,))

    def create_table(self, db):
        db.execute('''CREATE TABLE test (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            buf BLOB)''')
Ejemplo n.º 30
0
class cache_tests(unittest.TestCase):

    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir,
                                                           None, None))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

    def tearDown(self):
        llfuse.lock.release()
        self.cache.backend_pool = self.backend_pool
        self.cache.destroy()
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        self.dbfile.close()
        os.unlink(self.dbfile.name)

    def test_thread_hang(self):
        # Make sure that we don't deadlock if uploads threads or removal
        # threads have died and we try to expire or terminate

        # Monkeypatch to avoid error messages about uncaught exceptions
        # in other threads
        upload_exc = False
        removal_exc = False
        def _upload_loop(*a, fn=self.cache._upload_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal upload_exc
                upload_exc = True
        def _removal_loop(*a, fn=self.cache._removal_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal removal_exc
                removal_exc = True
        self.cache._upload_loop = _upload_loop
        self.cache._removal_loop = _removal_loop

        # Start threads
        self.cache.init(threads=3)

        # Create first object (we'll try to remove that)
        with self.cache.get(self.inode, 0) as fh:
            fh.write(b'bar wurfz!')
        self.cache.commit()
        self.cache.wait()

        # Make sure that upload and removal will fail
        os.rename(self.backend_dir, self.backend_dir + '-tmp')
        open(self.backend_dir, 'w').close()

        # Create second object (we'll try to upload that)
        with self.cache.get(self.inode, 1) as fh:
            fh.write(b'bar wurfz number two!')

        # Schedule a removal
        self.cache.remove(self.inode, 0)

        try:
            # Try to clean-up (implicitly calls expire)
            with llfuse.lock_released, \
                catch_logmsg('Unable to flush cache, no upload threads left alive',
                              level=logging.ERROR, count=1):
                with pytest.raises(OSError) as exc_info:
                     self.cache.destroy()
                assert exc_info.value.errno == errno.ENOTEMPTY
            assert upload_exc
            assert removal_exc
        finally:
            # Fix backend dir
            os.unlink(self.backend_dir)
            os.rename(self.backend_dir + '-tmp', self.backend_dir)

            # Remove objects from cache and make final destroy
            # call into no-op.
            self.cache.remove(self.inode, 1)
            self.cache.destroy = lambda: None


    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fh:
            return fh.read(len_)

    def test_get(self):
        inode = self.inode
        blockno = 11
        data = self.random_data(int(0.5 * self.max_obj_size))

        # Case 1: Object does not exist yet
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data)

        # Case 2: Object is in cache
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

        # Case 3: Object needs to be downloaded
        self.cache.clear()
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

    def test_expire(self):
        inode = self.inode

        # Define the 4 most recently accessed ones
        most_recent = [7, 11, 10, 8]
        for i in most_recent:
            safe_sleep(0.2)
            with self.cache.get(inode, i) as fh:
                fh.write(('%d' % i).encode())

        # And some others
        for i in range(20):
            if i in most_recent:
                continue
            with self.cache.get(inode, i) as fh:
                fh.write(('%d' % i).encode())

        # Flush the 2 most recently accessed ones
        commit(self.cache, inode, most_recent[-2])
        commit(self.cache, inode, most_recent[-3])

        # We want to expire 4 entries, 2 of which are already flushed
        self.cache.cache.max_entries = 16
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_write=2)
        self.cache.expire()
        self.cache.backend_pool.verify()
        self.assertEqual(len(self.cache.cache), 16)

        for i in range(20):
            if i in most_recent:
                self.assertTrue((inode, i) not in self.cache.cache)
            else:
                self.assertTrue((inode, i) in self.cache.cache)

    def test_upload(self):
        inode = self.inode
        datalen = int(0.1 * self.cache.cache.max_size)
        blockno1 = 21
        blockno2 = 25
        blockno3 = 7

        data1 = self.random_data(datalen)
        data2 = self.random_data(datalen)
        data3 = self.random_data(datalen)

        # Case 1: create new object
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data1)
            el1 = fh
        self.cache.upload(el1)
        self.cache.backend_pool.verify()

        # Case 2: Link new object
        self.cache.backend_pool = TestBackendPool(self.backend_pool)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data1)
            el2 = fh
        self.cache.upload(el2)
        self.cache.backend_pool.verify()

        # Case 3: Upload old object, still has references
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data2)
        self.cache.upload(el1)
        self.cache.backend_pool.verify()

        # Case 4: Upload old object, no references left
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_del=1, no_write=1)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data3)
        self.cache.upload(el2)
        self.cache.backend_pool.verify()

        # Case 5: Link old object, no references left
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_del=1)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data2)
        self.cache.upload(el2)
        self.cache.backend_pool.verify()

        # Case 6: Link old object, still has references
        # (Need to create another object first)
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_write=1)
        with self.cache.get(inode, blockno3) as fh:
            fh.seek(0)
            fh.write(data1)
            el3 = fh
        self.cache.upload(el3)
        self.cache.backend_pool.verify()

        self.cache.backend_pool = TestBackendPool(self.backend_pool)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.upload(el1)
        self.cache.clear()
        self.cache.backend_pool.verify()

    def test_remove_referenced(self):
        inode = self.inode
        datalen = int(0.1 * self.cache.cache.max_size)
        blockno1 = 21
        blockno2 = 24
        data = self.random_data(datalen)

        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data)
        self.cache.clear()
        self.cache.backend_pool.verify()

        self.cache.backend_pool = TestBackendPool(self.backend_pool)
        self.cache.remove(inode, blockno1)
        self.cache.backend_pool.verify()

    def test_remove_cache(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.max_obj_size))

        # Case 1: Elements only in cache
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.remove(inode, 1)
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertEqual(fh.read(42), b'')

    def test_upload_race(self):
        inode = self.inode
        blockno = 1
        data1 = self.random_data(int(0.4 * self.max_obj_size))
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data1)

        # Remove it
        self.cache.remove(inode, blockno)

        # Try to upload it, may happen if CommitThread is interrupted
        self.cache.upload(fh)

    def test_expire_race(self):
        # Create element
        inode = self.inode
        blockno = 1
        data1 = self.random_data(int(0.4 * self.max_obj_size))
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.upload(fh)

        # Make sure entry will be expired
        self.cache.cache.max_entries = 0
        def e_w_l():
            with llfuse.lock:
                self.cache.expire()

        # Lock it
        self.cache._lock_entry(inode, blockno, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(e_w_l)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(e_w_l)
            t2.start()

            # Release lock
            with llfuse.lock_released:
                safe_sleep(0.1)
                self.cache._unlock_entry(inode, blockno)
                t1.join_and_raise()
                t2.join_and_raise()

            assert len(self.cache.cache) == 0
        finally:
                self.cache._unlock_entry(inode, blockno, release_global=True,
                                         noerror=True)


    def test_parallel_expire(self):
        # Create elements
        inode = self.inode
        for i in range(5):
            data1 = self.random_data(int(0.4 * self.max_obj_size))
            with self.cache.get(inode, i) as fh:
                fh.write(data1)

        # We want to expire just one element, but have
        # several threads running expire() simultaneously
        self.cache.cache.max_entries = 4
        def e_w_l():
            with llfuse.lock:
                self.cache.expire()

        # Lock first element so that we have time to start threads
        self.cache._lock_entry(inode, 0, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(e_w_l)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(e_w_l)
            t2.start()

            # Release lock
            with llfuse.lock_released:
                safe_sleep(0.1)
                self.cache._unlock_entry(inode, 0)
                t1.join_and_raise()
                t2.join_and_raise()

            assert len(self.cache.cache) == 4
        finally:
                self.cache._unlock_entry(inode, 0, release_global=True,
                                         noerror=True)


    def test_remove_cache_db(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.max_obj_size))

        # Case 2: Element in cache and db
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_write=1)
        commit(self.cache, inode)
        self.cache.backend_pool.verify()
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_del=1)
        self.cache.remove(inode, 1)
        self.cache.backend_pool.verify()

        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertEqual(fh.read(42), b'')


    def test_remove_db(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.max_obj_size))

        # Case 3: Element only in DB
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_write=1)
        self.cache.clear()
        self.cache.backend_pool.verify()
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_del=1)
        self.cache.remove(inode, 1)
        self.cache.backend_pool.verify()
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertEqual(fh.read(42), b'')
Ejemplo n.º 31
0
class cache_tests(unittest.TestCase):
    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(lambda: local.Backend(
            Namespace(storage_url='local://' + self.backend_dir)))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        now_ns = time_ns()
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?,?)",
            (self.inode,
             stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
             | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
             os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

        cache = BlockCache(self.backend_pool, self.db,
                           self.cachedir + "/cache", self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True

        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        s3ql.block_cache.lock = MockLock()
        s3ql.block_cache.lock_released = MockLock()

    def tearDown(self):
        self.cache.backend_pool = self.backend_pool
        self.cache.destroy()
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        self.dbfile.close()
        os.unlink(self.dbfile.name)

    def test_thread_hang(self):
        # Make sure that we don't deadlock if uploads threads or removal
        # threads have died and we try to expire or terminate

        # Monkeypatch to avoid error messages about uncaught exceptions
        # in other threads
        upload_exc = False
        removal_exc = False

        def _upload_loop(*a, fn=self.cache._upload_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal upload_exc
                upload_exc = True

        def _removal_loop(*a, fn=self.cache._removal_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal removal_exc
                removal_exc = True

        self.cache._upload_loop = _upload_loop
        self.cache._removal_loop = _removal_loop

        # Start threads
        self.cache.init(threads=3)

        # Create first object (we'll try to remove that)
        with self.cache.get(self.inode, 0) as fh:
            fh.write(b'bar wurfz!')
        self.cache.start_flush()
        self.cache.wait()

        # Make sure that upload and removal will fail
        os.rename(self.backend_dir, self.backend_dir + '-tmp')
        open(self.backend_dir, 'w').close()

        # Create second object (we'll try to upload that)
        with self.cache.get(self.inode, 1) as fh:
            fh.write(b'bar wurfz number two!')

        # Schedule a removal
        self.cache.remove(self.inode, 0)

        try:
            # Try to clean-up (implicitly calls expire)
            with assert_logs(
                    'Unable to drop cache, no upload threads left alive',
                    level=logging.ERROR,
                    count=1):
                with pytest.raises(OSError) as exc_info:
                    self.cache.destroy()
                assert exc_info.value.errno == errno.ENOTEMPTY
            assert upload_exc
            assert removal_exc
        finally:
            # Fix backend dir
            os.unlink(self.backend_dir)
            os.rename(self.backend_dir + '-tmp', self.backend_dir)

            # Remove objects from cache and make final destroy
            # call into no-op.
            self.cache.remove(self.inode, 1)
            self.cache.destroy = lambda: None

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fh:
            return fh.read(len_)

    def test_get(self):
        inode = self.inode
        blockno = 11
        data = self.random_data(int(0.5 * self.max_obj_size))

        # Case 1: Object does not exist yet
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data)

        # Case 2: Object is in cache
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

        # Case 3: Object needs to be downloaded
        self.cache.drop()
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

    def test_expire(self):
        inode = self.inode

        # Define the 4 most recently accessed ones
        most_recent = [7, 11, 10, 8]
        for i in most_recent:
            safe_sleep(0.2)
            with self.cache.get(inode, i) as fh:
                fh.write(('%d' % i).encode())

        # And some others
        for i in range(20):
            if i in most_recent:
                continue
            with self.cache.get(inode, i) as fh:
                fh.write(('%d' % i).encode())

        # Flush the 2 most recently accessed ones
        start_flush(self.cache, inode, most_recent[-2])
        start_flush(self.cache, inode, most_recent[-3])

        # We want to expire 4 entries, 2 of which are already flushed
        self.cache.cache.max_entries = 16
        self.cache.backend_pool = MockBackendPool(self.backend_pool,
                                                  no_write=2)
        self.cache.expire()
        self.cache.backend_pool.verify()
        self.assertEqual(len(self.cache.cache), 16)

        for i in range(20):
            if i in most_recent:
                self.assertTrue((inode, i) not in self.cache.cache)
            else:
                self.assertTrue((inode, i) in self.cache.cache)

    def test_upload(self):
        inode = self.inode
        datalen = int(0.1 * self.cache.cache.max_size)
        blockno1 = 21
        blockno2 = 25
        blockno3 = 7

        data1 = self.random_data(datalen)
        data2 = self.random_data(datalen)
        data3 = self.random_data(datalen)

        # Case 1: create new object
        self.cache.backend_pool = MockBackendPool(self.backend_pool,
                                                  no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data1)
            el1 = fh
        assert self.cache.upload_if_dirty(el1)
        self.cache.backend_pool.verify()

        # Case 2: Link new object
        self.cache.backend_pool = MockBackendPool(self.backend_pool)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data1)
            el2 = fh
        assert not self.cache.upload_if_dirty(el2)
        self.cache.backend_pool.verify()

        # Case 3: Upload old object, still has references
        self.cache.backend_pool = MockBackendPool(self.backend_pool,
                                                  no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data2)
        assert self.cache.upload_if_dirty(el1)
        self.cache.backend_pool.verify()

        # Case 4: Upload old object, no references left
        self.cache.backend_pool = MockBackendPool(self.backend_pool,
                                                  no_del=1,
                                                  no_write=1)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data3)
        assert self.cache.upload_if_dirty(el2)
        self.cache.backend_pool.verify()

        # Case 5: Link old object, no references left
        self.cache.backend_pool = MockBackendPool(self.backend_pool, no_del=1)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data2)
        assert not self.cache.upload_if_dirty(el2)
        self.cache.backend_pool.verify()

        # Case 6: Link old object, still has references
        # (Need to create another object first)
        self.cache.backend_pool = MockBackendPool(self.backend_pool,
                                                  no_write=1)
        with self.cache.get(inode, blockno3) as fh:
            fh.seek(0)
            fh.write(data1)
            el3 = fh
        assert self.cache.upload_if_dirty(el3)
        self.cache.backend_pool.verify()

        self.cache.backend_pool = MockBackendPool(self.backend_pool)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data1)
        assert not self.cache.upload_if_dirty(el1)
        self.cache.drop()
        self.cache.backend_pool.verify()

    def test_remove_referenced(self):
        inode = self.inode
        datalen = int(0.1 * self.cache.cache.max_size)
        blockno1 = 21
        blockno2 = 24
        data = self.random_data(datalen)

        self.cache.backend_pool = MockBackendPool(self.backend_pool,
                                                  no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data)
        self.cache.drop()
        self.cache.backend_pool.verify()

        self.cache.backend_pool = MockBackendPool(self.backend_pool)
        self.cache.remove(inode, blockno1)
        self.cache.backend_pool.verify()

    def test_remove_cache(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.max_obj_size))

        # Case 1: Elements only in cache
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.remove(inode, 1)
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertEqual(fh.read(42), b'')

    def test_upload_race(self):
        inode = self.inode
        blockno = 1
        data1 = self.random_data(int(0.4 * self.max_obj_size))
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data1)

        # Remove it
        self.cache.remove(inode, blockno)

        # Try to upload it, may happen if CommitThread is interrupted
        self.cache.upload_if_dirty(fh)

    def test_expire_race(self):
        # Create element
        inode = self.inode
        blockno = 1
        data1 = self.random_data(int(0.4 * self.max_obj_size))
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data1)
        assert self.cache.upload_if_dirty(fh)

        # Make sure entry will be expired
        self.cache.cache.max_entries = 0

        # Lock it
        self.cache._lock_entry(inode, blockno, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(self.cache.expire)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(self.cache.expire)
            t2.start()

            # Release lock
            self.cache._unlock_entry(inode, blockno)
            t1.join_and_raise()
            t2.join_and_raise()

            assert len(self.cache.cache) == 0
        finally:
            self.cache._unlock_entry(inode,
                                     blockno,
                                     release_global=True,
                                     noerror=True)

    def test_parallel_expire(self):
        # Create elements
        inode = self.inode
        for i in range(5):
            data1 = self.random_data(int(0.4 * self.max_obj_size))
            with self.cache.get(inode, i) as fh:
                fh.write(data1)

        # We want to expire just one element, but have
        # several threads running expire() simultaneously
        self.cache.cache.max_entries = 4

        # Lock first element so that we have time to start threads
        self.cache._lock_entry(inode, 0, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(self.cache.expire)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(self.cache.expire)
            t2.start()

            # Release lock
            self.cache._unlock_entry(inode, 0)
            t1.join_and_raise()
            t2.join_and_raise()

            assert len(self.cache.cache) == 4
        finally:
            self.cache._unlock_entry(inode,
                                     0,
                                     release_global=True,
                                     noerror=True)

    def test_remove_cache_db(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.max_obj_size))

        # Case 2: Element in cache and db
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.backend_pool = MockBackendPool(self.backend_pool,
                                                  no_write=1)
        start_flush(self.cache, inode)
        self.cache.backend_pool.verify()
        self.cache.backend_pool = MockBackendPool(self.backend_pool, no_del=1)
        self.cache.remove(inode, 1)
        self.cache.backend_pool.verify()

        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertEqual(fh.read(42), b'')

    def test_remove_db(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.max_obj_size))

        # Case 3: Element only in DB
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.backend_pool = MockBackendPool(self.backend_pool,
                                                  no_write=1)
        self.cache.drop()
        self.cache.backend_pool.verify()
        self.cache.backend_pool = MockBackendPool(self.backend_pool, no_del=1)
        self.cache.remove(inode, 1)
        self.cache.backend_pool.verify()
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertEqual(fh.read(42), b'')

    def test_issue_241(self):

        inode = self.inode

        # Create block
        with self.cache.get(inode, 0) as fh:
            fh.write(self.random_data(500))

        # "Fill" cache
        self.cache.cache.max_entries = 0

        # Mock locking to reproduce race condition
        mlock = MockMultiLock(self.cache.mlock)
        with patch.object(self.cache, 'mlock', mlock):
            # Start first expiration run, will block in upload
            thread1 = AsyncFn(self.cache.expire)
            thread1.start()

            # Remove the object while the expiration thread waits
            # for it to become available.
            thread2 = AsyncFn(self.cache.remove, inode, 0, 1)
            thread2.start()
            mlock.yield_to(thread2)
            thread2.join_and_raise(timeout=10)
            assert not thread2.is_alive()

        # Create a new object for the same block
        with self.cache.get(inode, 0) as fh:
            fh.write(self.random_data(500))

        # Continue first expiration run
        mlock.yield_to(thread1, block=False)
        thread1.join_and_raise(timeout=10)
        assert not thread1.is_alive()
Ejemplo n.º 32
0
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    # Check for cached metadata
    cachepath = options.cachepath
    if not os.path.exists(cachepath + '.params'):
        raise QuietError("No local metadata found.")

    param = load_params(cachepath)

    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new.')

    if os.path.exists(DBNAME):
        raise QuietError('%s exists, aborting.' % DBNAME)

    log.info('Copying database...')
    dst = tempfile.NamedTemporaryFile()
    with open(cachepath + '.db', 'rb') as src:
        shutil.copyfileobj(src, dst)
    dst.flush()
    db = Connection(dst.name)

    log.info('Scrambling...')
    md5 = lambda x: hashlib.md5(x).hexdigest()
    for (id_, name) in db.query('SELECT id, name FROM names'):
        db.execute('UPDATE names SET name=? WHERE id=?',
                   (md5(name), id_))

    for (id_, name) in db.query('SELECT inode, target FROM symlink_targets'):
        db.execute('UPDATE symlink_targets SET target=? WHERE inode=?',
                   (md5(name), id_))

    for (id_, name) in db.query('SELECT rowid, value FROM ext_attributes'):
        db.execute('UPDATE ext_attributes SET value=? WHERE rowid=?',
                   (md5(name), id_))

    log.info('Saving...')
    with open(DBNAME, 'wb+') as fh:
        dump_metadata(db, fh)
Ejemplo n.º 33
0
class fsck_tests(TestCase):

    def setUp(self):
        self.bucket_dir = tempfile.mkdtemp()
        self.bucket = local.Bucket(self.bucket_dir, None, None)
        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.bucket,
                  { 'blocksize': self.blocksize }, self.db)
        self.fsck.expect_errors = True

    def tearDown(self):
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.bucket_dir)

    def assert_fsck(self, fn):
        '''Check that fn detects and corrects an error'''

        self.fsck.found_errors = False
        fn()
        self.assertTrue(self.fsck.found_errors)
        self.fsck.found_errors = False
        fn()
        self.assertFalse(self.fsck.found_errors)

    def test_cache(self):
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                              "VALUES (?,?,?,?,?,?,?)",
                              (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                               | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))

        # Create new block
        fh = open(self.cachedir + '%d-1' % inode, 'wb')
        fh.write('somedata')
        fh.close()
        self.assert_fsck(self.fsck.check_cache)
        self.assertEquals(self.bucket['s3ql_data_1'], 'somedata')

        # This should be ignored
        fh = open(self.cachedir + '%d-1' % inode, 'wb')
        fh.write('otherdata')
        fh.close()
        self.assert_fsck(self.fsck.check_cache)
        self.assertEquals(self.bucket['s3ql_data_1'], 'somedata')

        # Existing block
        with open(self.cachedir + '%d-2' % inode, 'wb') as fh:
            fh.write('somedata')    
        self.assert_fsck(self.fsck.check_cache)
                
        # Old block preserved
        with open(self.cachedir + '%d-1' % inode, 'wb') as fh:
            fh.write('overwriting somedata')
        self.assert_fsck(self.fsck.check_cache)
        
        # Old block removed
        with open(self.cachedir + '%d-2' % inode, 'wb') as fh:
            fh.write('overwriting last piece of somedata')
        self.assert_fsck(self.fsck.check_cache)
                
        
    def test_lof1(self):

        # Make lost+found a file
        inode = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?",
                                (b"lost+found", ROOT_INODE))
        self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,))
        self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?',
                        (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode))

        self.assert_fsck(self.fsck.check_lof)

    def test_lof2(self):
        # Remove lost+found
        name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (b'lost+found',))
        self.db.execute('DELETE FROM contents WHERE name_id=? and parent_inode=?',
                        (name_id, ROOT_INODE))

        self.assert_fsck(self.fsck.check_lof)

    def test_wrong_inode_refcount(self):
    
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                               0, 0, time.time(), time.time(), time.time(), 1, 0))
        self._link('name1', inode)
        self._link('name2', inode)
        self.assert_fsck(self.fsck.check_inode_refcount)

    def test_orphaned_inode(self):
        
        self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                      "VALUES (?,?,?,?,?,?,?,?)",
                      (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                       0, 0, time.time(), time.time(), time.time(), 1, 0))
        self.assert_fsck(self.fsck.check_inode_refcount)
        
    def test_name_refcount(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                               0, 0, time.time(), time.time(), time.time(), 1, 0))
        self._link('name1', inode)
        self._link('name2', inode)
        
        self.db.execute('UPDATE names SET refcount=refcount+1 WHERE name=?', ('name1',))
        
        self.assert_fsck(self.fsck.check_name_refcount)

    def test_orphaned_name(self):

        self._add_name('zupbrazl')
        self.assert_fsck(self.fsck.check_name_refcount)
            
    def test_ref_integrity(self):

        self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
                        (self._add_name('foobar'), 124, ROOT_INODE))
        
        self.fsck.found_errors = False
        self.fsck.check_foreign_keys()
        self.assertTrue(self.fsck.found_errors)
                
    def _add_name(self, name):
        '''Get id for *name* and increase refcount
        
        Name is inserted in table if it does not yet exist.
        '''
        
        try:
            name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (name,))
        except NoSuchRowError:
            name_id = self.db.rowid('INSERT INTO names (name, refcount) VALUES(?,?)',
                                    (name, 1))
        else:
            self.db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,))
        return name_id
            
    def _link(self, name, inode):
        '''Link /*name* to *inode*'''

        self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
                        (self._add_name(name), inode, ROOT_INODE))
                
    def test_inode_sizes(self):

        id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                            "VALUES (?,?,?,?,?,?,?,?)",
                            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                             0, 0, time.time(), time.time(), time.time(), 1, 128))
        self._link('test-entry', id_)

        obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 512))
        
        # Case 1
        self.db.execute('UPDATE inodes SET block_id=?, size=? WHERE id=?',
                        (None, self.blocksize + 120, id_))
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
                        (id_, 1, block_id))
        self.assert_fsck(self.fsck.check_inode_sizes)

        # Case 2
        self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_,))
        self.db.execute('UPDATE inodes SET block_id=?, size=? WHERE id=?',
                        (block_id, 129, id_))
        self.assert_fsck(self.fsck.check_inode_sizes)

        # Case 3
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
                        (id_, 1, block_id))
        self.db.execute('UPDATE inodes SET block_id=?, size=? WHERE id=?',
                        (block_id, self.blocksize + 120, id_))
        self.assert_fsck(self.fsck.check_inode_sizes)
        

    def test_keylist(self):
        # Create an object that only exists in the bucket
        self.bucket['s3ql_data_4364'] = 'Testdata'
        self.assert_fsck(self.fsck.check_keylist)

        # Create an object that does not exist in the bucket
        self.db.execute('INSERT INTO objects (id, refcount) VALUES(?, ?)', (34, 1))
        self.assert_fsck(self.fsck.check_keylist)

    def test_missing_obj(self):
        
        obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 128))
                
        id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size,block_id) "
                            "VALUES (?,?,?,?,?,?,?,?,?)",
                            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                             0, 0, time.time(), time.time(), time.time(), 1, 128, block_id))
        self._link('test-entry', id_)
        
        self.assert_fsck(self.fsck.check_keylist)
        
                
        
    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fd:
            return fd.read(len_)

    def test_loops(self):

        # Create some directory inodes  
        inodes = [ self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                                 "VALUES (?,?,?,?,?,?,?)",
                                 (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR,
                                  0, 0, time.time(), time.time(), time.time(), 1))
                   for dummy in range(3) ]

        inodes.append(inodes[0])
        last = inodes[0]
        for inode in inodes[1:]:
            self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)',
                            (self._add_name(bytes(inode)), inode, last))
            last = inode

        self.fsck.found_errors = False
        self.fsck.check_inode_refcount()
        self.assertFalse(self.fsck.found_errors)
        self.fsck.check_loops()
        self.assertTrue(self.fsck.found_errors)
        # We can't fix loops yet

    def test_obj_refcounts(self):

        obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
        self.db.execute('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                        (0, obj_id, 0))
        self.db.execute('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                        (0, obj_id, 0))

        self.assert_fsck(self.fsck.check_obj_refcounts)

    def test_orphaned_obj(self):

        self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
        self.assert_fsck(self.fsck.check_obj_refcounts)
        
    def test_wrong_block_refcount(self):

        obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 0))
        
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size,block_id) "
                              "VALUES (?,?,?,?,?,?,?,?,?)",
                              (stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(),
                               time.time(), time.time(), time.time(), 1, 0, block_id))
            
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (inode, 1, block_id))

        self.assert_fsck(self.fsck.check_block_refcount)
        
    def test_orphaned_block(self):
        
        obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
        self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                      (1, obj_id, 0))
        self.assert_fsck(self.fsck.check_block_refcount)
                
    def test_unix_size(self):

        inode = 42
        self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 0))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_inode_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode,))
        self.fsck.check_inode_unix()
        self.assertTrue(self.fsck.found_errors)

    
    def test_unix_size_symlink(self):

        inode = 42
        target = 'some funny random string'
        self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 
                         len(target)))
        self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, target))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_inode_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode,))
        self.fsck.check_inode_unix()
        self.assertTrue(self.fsck.found_errors)
        
    def test_unix_target(self):

        inode = 42
        self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
                     "VALUES (?,?,?,?,?,?,?,?)",
                     (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR,
                      os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_inode_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, 'foo'))
        self.fsck.check_inode_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_symlink_no_target(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                              "VALUES (?,?,?,?,?,?,?)",
                              (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)
        self.fsck.check_inode_unix()
        self.assertTrue(self.fsck.found_errors)
        
    def test_unix_rdev(self):

        inode = 42
        self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
                     "VALUES (?,?,?,?,?,?,?,?)",
                     (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
                      os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_inode_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode))
        self.fsck.check_inode_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_child(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                   "VALUES (?,?,?,?,?,?,?)",
                   (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                    os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_inode_unix()
        self.assertFalse(self.fsck.found_errors)
        self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
                        (self._add_name('foo'), ROOT_INODE, inode))
        self.fsck.check_inode_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_blocks(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                              "VALUES (?,?,?,?,?,?,?)", 
                              (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_inode_unix()
        self.assertFalse(self.fsck.found_errors)
        
        obj_id = self.db.rowid('INSERT INTO objects (refcount) VALUES(1)')
        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 0))

        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (inode, 1, block_id))

        self.fsck.check_inode_unix()
        self.assertTrue(self.fsck.found_errors)
Ejemplo n.º 34
0
class cache_tests(unittest.TestCase):
    def setUp(self):
        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)
        self.cache = inode_cache.InodeCache(self.db, 0)

    def tearDown(self):
        self.cache.destroy()
        os.unlink(self.dbfile.name)

    def test_create(self):
        attrs = {
            'mode': 784,
            'refcount': 3,
            'uid': 7,
            'gid': 2,
            'size': 34674,
            'rdev': 11,
            'atime_ns': time_ns(),
            'ctime_ns': time_ns(),
            'mtime_ns': time_ns()
        }

        inode = self.cache.create_inode(**attrs)

        for key in list(attrs.keys()):
            self.assertEqual(attrs[key], getattr(inode, key))

        self.assertTrue(
            self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id, )))

    def test_del(self):
        attrs = {
            'mode': 784,
            'refcount': 3,
            'uid': 7,
            'gid': 2,
            'size': 34674,
            'rdev': 11,
            'atime_ns': time_ns(),
            'ctime_ns': time_ns(),
            'mtime_ns': time_ns()
        }
        inode = self.cache.create_inode(**attrs)
        del self.cache[inode.id]
        self.assertFalse(
            self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id, )))
        self.assertRaises(KeyError, self.cache.__delitem__, inode.id)

    def test_get(self):
        attrs = {
            'mode': 784,
            'refcount': 3,
            'uid': 7,
            'gid': 2,
            'size': 34674,
            'rdev': 11,
            'atime_ns': time_ns(),
            'ctime_ns': time_ns(),
            'mtime_ns': time_ns()
        }

        inode = self.cache.create_inode(**attrs)
        for (key, val) in attrs.items():
            self.assertEqual(getattr(inode, key), val)

        # Create another inode
        self.cache.create_inode(**attrs)

        self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id, ))
        # Entry should still be in cache
        self.assertEqual(inode, self.cache[inode.id])

        # Now it should be out of the cache
        for _ in range(inode_cache.CACHE_SIZE + 1):
            self.cache.create_inode(**attrs)

        self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
Ejemplo n.º 35
0
class fsck_tests(unittest.TestCase):

    def setUp(self):
        self.backend_dir = tempfile.mkdtemp()
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        self.fsck.expect_errors = True

    def tearDown(self):
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        os.unlink(self.dbfile.name)

    def assert_fsck(self, fn):
        '''Check that fn detects and corrects an error'''

        self.fsck.found_errors = False
        fn()
        self.assertTrue(self.fsck.found_errors)
        self.fsck.found_errors = False
        self.fsck.check()
        self.assertFalse(self.fsck.found_errors)

    def test_cache(self):
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                               | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(),
                               1, 8))
        self._link('test-entry', inode)

        # Create new block
        fh = open(self.cachedir + '/%d-0' % inode, 'wb')
        fh.write('somedata')
        fh.close()
        self.assert_fsck(self.fsck.check_cache)
        self.assertEquals(self.backend['s3ql_data_1'], 'somedata')

        # Existing block
        self.db.execute('UPDATE inodes SET size=? WHERE id=?',
                        (self.max_obj_size + 8, inode))
        with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
            fh.write('somedata')
        self.assert_fsck(self.fsck.check_cache)

        # Old block preserved
        with open(self.cachedir + '/%d-0' % inode, 'wb') as fh:
            fh.write('somedat2')
        self.assert_fsck(self.fsck.check_cache)

        # Old block removed
        with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
            fh.write('somedat3')
        self.assert_fsck(self.fsck.check_cache)


    def test_lof1(self):

        # Make lost+found a file
        inode = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?",
                                (b"lost+found", ROOT_INODE))
        self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,))
        self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?',
                        (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode))

        def check():
            self.fsck.check_lof()
            self.fsck.check_inodes_refcount()

        self.assert_fsck(check)

    def test_lof2(self):
        # Remove lost+found
        name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (b'lost+found',))
        inode = self.db.get_val('SELECT inode FROM contents WHERE name_id=? AND '
                                'parent_inode=?', (name_id, ROOT_INODE))
        self.db.execute('DELETE FROM inodes WHERE id=?', (inode,))
        self.db.execute('DELETE FROM contents WHERE name_id=? and parent_inode=?',
                        (name_id, ROOT_INODE))
        self.db.execute('UPDATE names SET refcount = refcount-1 WHERE id=?', (name_id,))

        self.assert_fsck(self.fsck.check_lof)

    def test_wrong_inode_refcount(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                               0, 0, time.time(), time.time(), time.time(), 1, 0))
        self._link('name1', inode)
        self._link('name2', inode)
        self.assert_fsck(self.fsck.check_inodes_refcount)

    def test_orphaned_inode(self):

        self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                      "VALUES (?,?,?,?,?,?,?,?)",
                      (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                       0, 0, time.time(), time.time(), time.time(), 1, 0))
        self.assert_fsck(self.fsck.check_inodes_refcount)

    def test_name_refcount(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                               0, 0, time.time(), time.time(), time.time(), 2, 0))
        self._link('name1', inode)
        self._link('name2', inode)

        self.db.execute('UPDATE names SET refcount=refcount+1 WHERE name=?', ('name1',))

        self.assert_fsck(self.fsck.check_names_refcount)

    def test_orphaned_name(self):

        self._add_name('zupbrazl')
        self.assert_fsck(self.fsck.check_names_refcount)

    def test_contents_inode(self):

        self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
                        (self._add_name('foobar'), 124, ROOT_INODE))

        self.assert_fsck(self.fsck.check_contents_inode)

    def test_contents_inode_p(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR,
                               0, 0, time.time(), time.time(), time.time(), 1, 0))
        self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
                        (self._add_name('foobar'), inode, 123))

        self.assert_fsck(self.fsck.check_contents_parent_inode)

    def test_contents_name(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR,
                               0, 0, time.time(), time.time(), time.time(), 1, 0))
        self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
                        (42, inode, ROOT_INODE))

        self.assert_fsck(self.fsck.check_contents_name)

    def _add_name(self, name):
        '''Get id for *name* and increase refcount
        
        Name is inserted in table if it does not yet exist.
        '''

        try:
            name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (name,))
        except NoSuchRowError:
            name_id = self.db.rowid('INSERT INTO names (name, refcount) VALUES(?,?)',
                                    (name, 1))
        else:
            self.db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?', (name_id,))
        return name_id

    def _link(self, name, inode, parent_inode=ROOT_INODE):
        '''Link /*name* to *inode*'''

        self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
                        (self._add_name(name), inode, parent_inode))

    def test_inodes_size(self):

        id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                            "VALUES (?,?,?,?,?,?,?,?)",
                            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                             0, 0, time.time(), time.time(), time.time(), 1, 128))
        self._link('test-entry', id_)

        obj_id = self.db.rowid('INSERT INTO objects (refcount,size) VALUES(?,?)', (1, 36))
        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 512))
        self.backend['s3ql_data_%d' % obj_id] = 'foo'

        # Case 1
        self.db.execute('UPDATE inodes SET size=? WHERE id=?', (self.max_obj_size + 120, id_))
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
                        (id_, 1, block_id))
        self.assert_fsck(self.fsck.check_inodes_size)

        # Case 2
        self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_,))
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
                        (id_, 0, block_id))
        self.db.execute('UPDATE inodes SET size=? WHERE id=?', (129, id_))
        self.assert_fsck(self.fsck.check_inodes_size)

        # Case 3
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
                        (id_, 1, block_id))
        self.db.execute('UPDATE inodes SET size=? WHERE id=?',
                        (self.max_obj_size + 120, id_))
        self.db.execute('UPDATE blocks SET refcount = refcount + 1 WHERE id = ?',
                        (block_id,))
        self.assert_fsck(self.fsck.check_inodes_size)


    def test_objects_id(self):
        # Create an object that only exists in the backend
        self.backend['s3ql_data_4364'] = 'Testdata'
        self.assert_fsck(self.fsck.check_objects_id)

        # Create an object that does not exist in the backend
        self.db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)', (34, 1, 27))
        self.assert_fsck(self.fsck.check_objects_id)

    def test_blocks_obj_id(self):

        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, 48, 128))

        id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                            "VALUES (?,?,?,?,?,?,?,?)",
                            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                             0, 0, time.time(), time.time(), time.time(), 1, 128))
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (id_, 0, block_id))

        self._link('test-entry', id_)
        self.assert_fsck(self.fsck.check_blocks_obj_id)

    def test_missing_obj(self):

        obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 32)')
        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 128))

        id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                            "VALUES (?,?,?,?,?,?,?,?)",
                            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                             0, 0, time.time(), time.time(), time.time(), 1, 128))
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (id_, 0, block_id))

        self._link('test-entry', id_)
        self.assert_fsck(self.fsck.check_objects_id)


    def test_inode_blocks_inode(self):

        obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 42)')
        self.backend['s3ql_data_%d' % obj_id] = 'foo'

        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 34))

        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (27, 0, block_id))

        self.assert_fsck(self.fsck.check_inode_blocks_inode)

    def test_inode_blocks_block_id(self):

        id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                            "VALUES (?,?,?,?,?,?,?,?)",
                            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                             0, 0, time.time(), time.time(), time.time(), 1, 128))
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (id_, 0, 35))

        self._link('test-entry', id_)
        self.assert_fsck(self.fsck.check_inode_blocks_block_id)

    def test_symlinks_inode(self):

        self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)',
                        (42, b'somewhere else'))

        self.assert_fsck(self.fsck.check_symlinks_inode)

    def test_ext_attrs_inode(self):

        self.db.execute('INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)',
                        (self._add_name('some name'), 34, b'some value'))

        self.assert_fsck(self.fsck.check_ext_attributes_inode)

    def test_ext_attrs_name(self):

        id_ = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                            "VALUES (?,?,?,?,?,?,?,?)",
                            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                             0, 0, time.time(), time.time(), time.time(), 1, 128))
        self._link('test-entry', id_)

        self.db.execute('INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)',
                        (34, id_, b'some value'))

        self.assert_fsck(self.fsck.check_ext_attributes_name)

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fd:
            return fd.read(len_)

    def test_loops(self):

        # Create some directory inodes  
        inodes = [ self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                                 "VALUES (?,?,?,?,?,?,?)",
                                 (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR,
                                  0, 0, time.time(), time.time(), time.time(), 1))
                   for dummy in range(3) ]

        inodes.append(inodes[0])
        last = inodes[0]
        for inode in inodes[1:]:
            self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)',
                            (self._add_name(bytes(inode)), inode, last))
            last = inode

        self.assert_fsck(self.fsck.check_loops)

    def test_obj_refcounts(self):

        obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 42)')
        block_id_1 = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 0))
        block_id_2 = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                   (1, obj_id, 0))
        self.backend['s3ql_data_%d' % obj_id] = 'foo'

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(),
                               1, 2048))
        self._link('test-entry', inode)
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (inode, 1, block_id_1))
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (inode, 2, block_id_2))

        self.assert_fsck(self.fsck.check_objects_refcount)

    def test_orphaned_obj(self):

        self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 33)')
        self.assert_fsck(self.fsck.check_objects_refcount)

    def test_wrong_block_refcount(self):

        obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 23)')
        self.backend['s3ql_data_%d' % obj_id] = 'foo'
        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 0))

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(), os.getgid(),
                               time.time(), time.time(), time.time(), 1, self.max_obj_size))
        self._link('test-entry', inode)

        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (inode, 0, block_id))
        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (inode, 1, block_id))

        self.assert_fsck(self.fsck.check_blocks_refcount)

    def test_orphaned_block(self):

        obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 24)')
        self.backend['s3ql_data_%d' % obj_id] = 'foo'
        self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                      (1, obj_id, 3))
        self.assert_fsck(self.fsck.check_blocks_refcount)

    def test_unix_size(self):

        inode = 42
        self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 0))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode,))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)


    def test_unix_size_symlink(self):

        inode = 42
        target = 'some funny random string'
        self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1,
                         len(target)))
        self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, target))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode,))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_target(self):

        inode = 42
        self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
                     "VALUES (?,?,?,?,?,?,?,?)",
                     (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR,
                      os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('INSERT INTO symlink_targets (inode, target) VALUES(?,?)', (inode, 'foo'))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_nomode_reg(self):

        perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP
        stamp = time.time()
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                              "VALUES (?,?,?,?,?,?,?)",
                              (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1))
        self._link('test-entry', inode)

        self.assert_fsck(self.fsck.check_unix)
        
        newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?', (inode,))
        self.assertEqual(stat.S_IMODE(newmode), perms)
        self.assertEqual(stat.S_IFMT(newmode), stat.S_IFREG)

    def test_unix_nomode_dir(self):

        perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP
        stamp = time.time()
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                              "VALUES (?,?,?,?,?,?,?)",
                              (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1))
        inode2 = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                              "VALUES (?,?,?,?,?,?,?)",
                              (perms | stat.S_IFREG, os.getuid(), os.getgid(), stamp, 
                               stamp, stamp, 1))

        self._link('test-entry', inode)
        self._link('subentry', inode2, inode)

        self.assert_fsck(self.fsck.check_unix)
        
        newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?', (inode,))
        self.assertEqual(stat.S_IMODE(newmode), perms)
        self.assertEqual(stat.S_IFMT(newmode), stat.S_IFDIR)
        
        
    def test_unix_symlink_no_target(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                              "VALUES (?,?,?,?,?,?,?)",
                              (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_rdev(self):

        inode = 42
        self.db.execute("INSERT INTO inodes (id, mode,uid,gid,mtime,atime,ctime,refcount) "
                     "VALUES (?,?,?,?,?,?,?,?)",
                     (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR,
                      os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_child(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                   "VALUES (?,?,?,?,?,?,?)",
                   (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                    os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)
        self.db.execute('INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
                        (self._add_name('foo'), ROOT_INODE, inode))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_blocks(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                              "VALUES (?,?,?,?,?,?,?)",
                              (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))
        self._link('test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        obj_id = self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 32)')
        block_id = self.db.rowid('INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
                                 (1, obj_id, 0))

        self.db.execute('INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
                        (inode, 1, block_id))

        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)
Ejemplo n.º 36
0
class fsck_tests(unittest.TestCase):
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend = local.Backend(
            Namespace(storage_url='local://' + self.backend_dir))
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                         {'max_obj_size': self.max_obj_size}, self.db)
        self.fsck.expect_errors = True

    def tearDown(self):
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        self.dbfile.close()

    def assert_fsck(self, fn):
        '''Check that fn detects and corrects an error'''

        self.fsck.found_errors = False
        fn()
        self.assertTrue(self.fsck.found_errors)
        self.fsck.found_errors = False
        self.fsck.check()
        self.assertFalse(self.fsck.found_errors)

    def test_cache(self):
        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
             | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
             os.getuid(), os.getgid(), time_ns(), time_ns(), time_ns(), 1, 8))
        self._link(b'test-entry', inode)

        # Create new block
        fh = open(self.cachedir + '/%d-0' % inode, 'wb')
        fh.write(b'somedata')
        fh.close()
        self.assert_fsck(self.fsck.check_cache)
        self.assertEqual(self.backend['s3ql_data_1'], b'somedata')

        # Existing block
        self.db.execute('UPDATE inodes SET size=? WHERE id=?',
                        (self.max_obj_size + 8, inode))
        with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
            fh.write(b'somedata')
        self.assert_fsck(self.fsck.check_cache)

        # Old block preserved
        with open(self.cachedir + '/%d-0' % inode, 'wb') as fh:
            fh.write(b'somedat2')
        self.assert_fsck(self.fsck.check_cache)

        # Old block removed
        with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
            fh.write(b'somedat3')
        self.assert_fsck(self.fsck.check_cache)

    def test_lof1(self):

        # Make lost+found a file
        inode = self.db.get_val(
            "SELECT inode FROM contents_v WHERE name=? AND parent_inode=?",
            (b"lost+found", ROOT_INODE))
        self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode, ))
        self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?',
                        (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode))

        def check():
            self.fsck.check_lof()
            self.fsck.check_inodes_refcount()

        self.assert_fsck(check)

    def test_lof2(self):
        # Remove lost+found
        name_id = self.db.get_val('SELECT id FROM names WHERE name=?',
                                  (b'lost+found', ))
        inode = self.db.get_val(
            'SELECT inode FROM contents WHERE name_id=? AND '
            'parent_inode=?', (name_id, ROOT_INODE))
        self.db.execute('DELETE FROM inodes WHERE id=?', (inode, ))
        self.db.execute(
            'DELETE FROM contents WHERE name_id=? and parent_inode=?',
            (name_id, ROOT_INODE))
        self.db.execute('UPDATE names SET refcount = refcount-1 WHERE id=?',
                        (name_id, ))

        self.assert_fsck(self.fsck.check_lof)

    def test_wrong_inode_refcount(self):

        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 0))
        self._link(b'name1', inode)
        self._link(b'name2', inode)
        self.assert_fsck(self.fsck.check_inodes_refcount)

    def test_orphaned_inode(self):

        self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 0))
        self.assert_fsck(self.fsck.check_inodes_refcount)

    def test_name_refcount(self):

        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 2, 0))
        self._link(b'name1', inode)
        self._link(b'name2', inode)

        self.db.execute('UPDATE names SET refcount=refcount+1 WHERE name=?',
                        (b'name1', ))

        self.assert_fsck(self.fsck.check_names_refcount)

    def test_orphaned_name(self):

        self._add_name(b'zupbrazl')
        self.assert_fsck(self.fsck.check_names_refcount)

    def test_contents_inode(self):

        self.db.execute(
            'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
            (self._add_name(b'foobar'), 124, ROOT_INODE))

        self.assert_fsck(self.fsck.check_contents_inode)

    def test_contents_inode_p(self):

        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 0))
        self.db.execute(
            'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
            (self._add_name(b'foobar'), inode, 123))

        self.assert_fsck(self.fsck.check_contents_parent_inode)

    def test_contents_name(self):

        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 0))
        self.db.execute(
            'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
            (42, inode, ROOT_INODE))

        self.assert_fsck(self.fsck.check_contents_name)

    def _add_name(self, name):
        '''Get id for *name* and increase refcount

        Name is inserted in table if it does not yet exist.
        '''

        try:
            name_id = self.db.get_val('SELECT id FROM names WHERE name=?',
                                      (name, ))
        except NoSuchRowError:
            name_id = self.db.rowid(
                'INSERT INTO names (name, refcount) VALUES(?,?)', (name, 1))
        else:
            self.db.execute('UPDATE names SET refcount=refcount+1 WHERE id=?',
                            (name_id, ))
        return name_id

    def _link(self, name, inode, parent_inode=ROOT_INODE):
        '''Link /*name* to *inode*'''

        self.db.execute(
            'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
            (self._add_name(name), inode, parent_inode))

    def test_inodes_size(self):

        id_ = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 128))
        self._link(b'test-entry', id_)

        obj_id = self.db.rowid(
            'INSERT INTO objects (refcount,size) VALUES(?,?)', (1, 36))
        block_id = self.db.rowid(
            'INSERT INTO blocks (refcount, obj_id, size, hash) '
            'VALUES(?,?,?,?)', (1, obj_id, 512, sha256(b'foo')))
        self.backend['s3ql_data_%d' % obj_id] = b'foo'

        # Case 1
        self.db.execute('UPDATE inodes SET size=? WHERE id=?',
                        (self.max_obj_size + 120, id_))
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
            (id_, 1, block_id))
        self.assert_fsck(self.fsck.check_inodes_size)

        # Case 2
        self.db.execute('DELETE FROM inode_blocks WHERE inode=?', (id_, ))
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
            (id_, 0, block_id))
        self.db.execute('UPDATE inodes SET size=? WHERE id=?', (129, id_))
        self.assert_fsck(self.fsck.check_inodes_size)

        # Case 3
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
            (id_, 1, block_id))
        self.db.execute('UPDATE inodes SET size=? WHERE id=?',
                        (self.max_obj_size + 120, id_))
        self.db.execute(
            'UPDATE blocks SET refcount = refcount + 1 WHERE id = ?',
            (block_id, ))
        self.assert_fsck(self.fsck.check_inodes_size)

    def test_objects_id(self):
        # Create an object that only exists in the backend
        self.backend['s3ql_data_4364'] = b'Testdata'
        self.assert_fsck(self.fsck.check_objects_id)

        # Create an object that does not exist in the backend
        self.db.execute(
            'INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
            (34, 1, 27))
        self.assert_fsck(self.fsck.check_objects_id)

    def test_blocks_checksum(self):
        id_ = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 8))
        self._link(b'test-entry', id_)

        # Assume that due to a crash we did not write the hash for the block
        self.backend['s3ql_data_4364'] = b'Testdata'
        self.db.execute(
            'INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
            (4364, 1, 8))
        block_id = self.db.execute(
            'INSERT INTO blocks (obj_id, refcount, size) VALUES(?, ?, ?)',
            (4364, 1, 8))
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?, ?, ?)',
            (id_, 0, block_id))

        # Should pick up wrong hash and delete objects
        self.fsck.found_errors = False
        self.fsck.check_blocks_checksum()
        assert self.fsck.found_errors
        self.fsck.found_errors = False
        self.fsck.check_blocks_checksum()
        assert not self.fsck.found_errors

        # Should save files in lost+found
        self.fsck.found_errors = False
        self.fsck.check()
        assert self.fsck.found_errors

        # Now everything should be good
        self.fsck.found_errors = False
        self.fsck.check()
        assert not self.fsck.found_errors

        assert not self.db.has_val(
            'SELECT block_id FROM inode_blocks WHERE inode=?', (id_, ))
        inode_p = self.db.get_val(
            'SELECT parent_inode FROM contents_v WHERE inode=?', (id_, ))
        lof_id = self.db.get_val(
            "SELECT inode FROM contents_v WHERE name=? AND parent_inode=?",
            (b"lost+found", ROOT_INODE))
        assert inode_p == lof_id

    def test_blocks_obj_id(self):

        block_id = self.db.rowid(
            'INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
            (1, 48, 128))

        id_ = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 128))
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
            (id_, 0, block_id))

        self._link(b'test-entry', id_)
        self.assert_fsck(self.fsck.check_blocks_obj_id)

    def test_missing_obj(self):

        obj_id = self.db.rowid(
            'INSERT INTO objects (refcount, size) VALUES(1, 32)')
        block_id = self.db.rowid(
            'INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
            (1, obj_id, 128))

        id_ = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 128))
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
            (id_, 0, block_id))

        self._link(b'test-entry', id_)
        self.assert_fsck(self.fsck.check_objects_id)

    def test_inode_blocks_inode(self):

        obj_id = self.db.rowid(
            'INSERT INTO objects (refcount, size) VALUES(1, 42)')
        self.backend['s3ql_data_%d' % obj_id] = b'foo'

        block_id = self.db.rowid(
            'INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
            (1, obj_id, 34))

        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
            (27, 0, block_id))

        self.assert_fsck(self.fsck.check_inode_blocks_inode)

    def test_inode_blocks_block_id(self):

        id_ = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 128))
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
            (id_, 0, 35))

        self._link(b'test-entry', id_)
        self.assert_fsck(self.fsck.check_inode_blocks_block_id)

    def test_symlinks_inode(self):

        self.db.execute(
            'INSERT INTO symlink_targets (inode, target) VALUES(?,?)',
            (42, b'somewhere else'))

        self.assert_fsck(self.fsck.check_symlinks_inode)

    def test_ext_attrs_inode(self):

        self.db.execute(
            'INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)',
            (self._add_name(b'some name'), 34, b'some value'))

        self.assert_fsck(self.fsck.check_ext_attributes_inode)

    def test_ext_attrs_name(self):

        id_ = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
             time_ns(), time_ns(), 1, 128))
        self._link(b'test-entry', id_)

        self.db.execute(
            'INSERT INTO ext_attributes (name_id, inode, value) VALUES(?,?,?)',
            (34, id_, b'some value'))

        self.assert_fsck(self.fsck.check_ext_attributes_name)

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fd:
            return fd.read(len_)

    def test_loops(self):

        # Create some directory inodes
        inodes = [
            self.db.rowid(
                "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) "
                "VALUES (?,?,?,?,?,?,?)",
                (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR, 0, 0, time_ns(),
                 time_ns(), time_ns(), 1)) for dummy in range(3)
        ]

        inodes.append(inodes[0])
        last = inodes[0]
        for inode in inodes[1:]:
            self.db.execute(
                'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?, ?, ?)',
                (self._add_name(str(inode).encode()), inode, last))
            last = inode

        self.assert_fsck(self.fsck.check_loops)

    def test_tmpfile(self):
        # Ensure that path exists
        objname = 's3ql_data_38375'
        self.backend[objname] = b'bla'
        del self.backend[objname]
        path = self.backend._key_to_path(objname)
        tmpname = '%s#%d-%d.tmp' % (path, os.getpid(), _thread.get_ident())
        with open(tmpname, 'wb') as fh:
            fh.write(b'Hello, world')

        self.assert_fsck(self.fsck.check_objects_temp)

    def test_obj_refcounts(self):

        obj_id = self.db.rowid(
            'INSERT INTO objects (refcount, size) VALUES(1, 42)')
        block_id_1 = self.db.rowid(
            'INSERT INTO blocks (refcount, obj_id, size, hash) '
            'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b'foo')))
        block_id_2 = self.db.rowid(
            'INSERT INTO blocks (refcount, obj_id, size, hash) '
            'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b'bar')))
        self.backend['s3ql_data_%d' % obj_id] = b'foo and bar'

        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(),
             os.getgid(), time_ns(), time_ns(), time_ns(), 1, 2048))
        self._link(b'test-entry', inode)
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
            (inode, 1, block_id_1))
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
            (inode, 2, block_id_2))

        self.assert_fsck(self.fsck.check_objects_refcount)

    def test_orphaned_obj(self):

        self.db.rowid('INSERT INTO objects (refcount, size) VALUES(1, 33)')
        self.assert_fsck(self.fsck.check_objects_refcount)

    def test_wrong_block_refcount(self):

        obj_id = self.db.rowid(
            'INSERT INTO objects (refcount, size) VALUES(1, 23)')
        self.backend['s3ql_data_%d' % obj_id] = b'foo'
        block_id = self.db.rowid(
            'INSERT INTO blocks (refcount, obj_id, size, hash) '
            'VALUES(?,?,?,?)', (1, obj_id, 0, sha256(b'')))

        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(),
             os.getgid(), time_ns(), time_ns(), time_ns(), 1,
             self.max_obj_size))
        self._link(b'test-entry', inode)

        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
            (inode, 0, block_id))
        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
            (inode, 1, block_id))

        self.assert_fsck(self.fsck.check_blocks_refcount)

    def test_orphaned_block(self):

        obj_id = self.db.rowid(
            'INSERT INTO objects (refcount, size) VALUES(1, 24)')
        self.backend['s3ql_data_%d' % obj_id] = b'foo'
        self.db.rowid(
            'INSERT INTO blocks (refcount, obj_id, size, hash) VALUES(?,?,?,?)',
            (1, obj_id, 3, sha256(b'xyz')))
        self.assert_fsck(self.fsck.check_blocks_refcount)

    def test_unix_size(self):

        inode = 42
        self.db.execute(
            "INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?,?)",
            (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(),
             os.getgid(), time_ns(), time_ns(), time_ns(), 1, 0))
        self._link(b'test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('UPDATE inodes SET size = 1 WHERE id=?', (inode, ))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_size_symlink(self):

        inode = 42
        target = b'some funny random string'
        self.db.execute(
            "INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?,?)",
            (inode, stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(),
             os.getgid(), time_ns(), time_ns(), time_ns(), 1, len(target)))
        self.db.execute(
            'INSERT INTO symlink_targets (inode, target) VALUES(?,?)',
            (inode, target))
        self._link(b'test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('UPDATE inodes SET size = 0 WHERE id=?', (inode, ))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_target(self):

        inode = 42
        self.db.execute(
            "INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (inode, stat.S_IFCHR | stat.S_IRUSR | stat.S_IWUSR, os.getuid(),
             os.getgid(), time_ns(), time_ns(), time_ns(), 1))
        self._link(b'test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute(
            'INSERT INTO symlink_targets (inode, target) VALUES(?,?)',
            (inode, 'foo'))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_nomode_reg(self):

        perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP
        stamp = time_ns()
        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) "
            "VALUES (?,?,?,?,?,?,?)",
            (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1))
        self._link(b'test-entry', inode)

        self.assert_fsck(self.fsck.check_unix)

        newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?',
                                  (inode, ))
        self.assertEqual(stat.S_IMODE(newmode), perms)
        self.assertEqual(stat.S_IFMT(newmode), stat.S_IFREG)

    def test_unix_nomode_dir(self):

        perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IROTH | stat.S_IRGRP
        stamp = time_ns()
        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) "
            "VALUES (?,?,?,?,?,?,?)",
            (perms, os.getuid(), os.getgid(), stamp, stamp, stamp, 1))
        inode2 = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) "
            "VALUES (?,?,?,?,?,?,?)", (perms | stat.S_IFREG, os.getuid(),
                                       os.getgid(), stamp, stamp, stamp, 1))

        self._link(b'test-entry', inode)
        self._link(b'subentry', inode2, inode)

        self.assert_fsck(self.fsck.check_unix)

        newmode = self.db.get_val('SELECT mode FROM inodes WHERE id=?',
                                  (inode, ))
        self.assertEqual(stat.S_IMODE(newmode), perms)
        self.assertEqual(stat.S_IFMT(newmode), stat.S_IFDIR)

    def test_unix_symlink_no_target(self):

        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) "
            "VALUES (?,?,?,?,?,?,?)",
            (stat.S_IFLNK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(),
             os.getgid(), time_ns(), time_ns(), time_ns(), 1))
        self._link(b'test-entry', inode)
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_rdev(self):

        inode = 42
        self.db.execute(
            "INSERT INTO inodes (id, mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) "
            "VALUES (?,?,?,?,?,?,?,?)",
            (inode, stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR, os.getuid(),
             os.getgid(), time_ns(), time_ns(), time_ns(), 1))
        self._link(b'test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        self.db.execute('UPDATE inodes SET rdev=? WHERE id=?', (42, inode))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_child(self):

        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) "
            "VALUES (?,?,?,?,?,?,?)",
            (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, os.getuid(),
             os.getgid(), time_ns(), time_ns(), time_ns(), 1))
        self._link(b'test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)
        self.db.execute(
            'INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)',
            (self._add_name(b'foo'), ROOT_INODE, inode))
        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)

    def test_unix_blocks(self):

        inode = self.db.rowid(
            "INSERT INTO inodes (mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount) "
            "VALUES (?,?,?,?,?,?,?)",
            (stat.S_IFSOCK | stat.S_IRUSR | stat.S_IWUSR, os.getuid(),
             os.getgid(), time_ns(), time_ns(), time_ns(), 1))
        self._link(b'test-entry', inode)

        self.fsck.found_errors = False
        self.fsck.check_unix()
        self.assertFalse(self.fsck.found_errors)

        obj_id = self.db.rowid(
            'INSERT INTO objects (refcount, size) VALUES(1, 32)')
        block_id = self.db.rowid(
            'INSERT INTO blocks (refcount, obj_id, size) VALUES(?,?,?)',
            (1, obj_id, 0))

        self.db.execute(
            'INSERT INTO inode_blocks (inode, blockno, block_id) VALUES(?,?,?)',
            (inode, 1, block_id))

        self.fsck.check_unix()
        self.assertTrue(self.fsck.found_errors)
Ejemplo n.º 37
0
class cache_tests(unittest.TestCase):

    def setUp(self):
        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)
        self.cache = inode_cache.InodeCache(self.db, 0)

    def tearDown(self):
        self.cache.destroy()
        os.unlink(self.dbfile.name)

    def test_create(self):
        attrs = {'mode': 784,
                 'refcount': 3,
                 'uid': 7,
                 'gid': 2,
                 'size': 34674,
                 'rdev': 11,
                 'atime': time.time(),
                 'ctime': time.time(),
                 'mtime': time.time() }

        inode = self.cache.create_inode(**attrs)

        for key in attrs.keys():
            self.assertEqual(attrs[key], getattr(inode, key))

        self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))


    def test_del(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }
        inode = self.cache.create_inode(**attrs)
        del self.cache[inode.id]
        self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
        self.assertRaises(KeyError, self.cache.__delitem__, inode.id)

    def test_get(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }

        inode = self.cache.create_inode(**attrs)
        for (key, val) in attrs.iteritems():
            self.assertEqual(getattr(inode, key), val)

        # Create another inode
        self.cache.create_inode(**attrs)

        self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,))
        # Entry should still be in cache
        self.assertEqual(inode, self.cache[inode.id])

        # Now it should be out of the cache
        for _ in xrange(inode_cache.CACHE_SIZE + 1):
            self.cache.create_inode(**attrs)

        self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
Ejemplo n.º 38
0
class cache_tests(TestCase):
    def setUp(self):

        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))

        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)",
            (
                self.inode,
                stat.S_IFREG
                | stat.S_IRUSR
                | stat.S_IWUSR
                | stat.S_IXUSR
                | stat.S_IRGRP
                | stat.S_IXGRP
                | stat.S_IROTH
                | stat.S_IXOTH,
                os.getuid(),
                os.getgid(),
                time.time(),
                time.time(),
                time.time(),
                1,
                32,
            ),
        )

        self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

    def tearDown(self):
        self.cache.bucket_pool = self.bucket_pool
        self.cache.destroy()
        if os.path.exists(self.cachedir):
            shutil.rmtree(self.cachedir)
        shutil.rmtree(self.bucket_dir)

        llfuse.lock.release()

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fh:
            return fh.read(len_)

    def test_get(self):
        inode = self.inode
        blockno = 11
        data = self.random_data(int(0.5 * self.blocksize))

        # Case 1: Object does not exist yet
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data)

        # Case 2: Object is in cache
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

        # Case 3: Object needs to be downloaded
        self.cache.clear()
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

    def test_expire(self):
        inode = self.inode

        # Define the 4 most recently accessed ones
        most_recent = [7, 11, 10, 8]
        for i in most_recent:
            time.sleep(0.2)
            with self.cache.get(inode, i) as fh:
                fh.write("%d" % i)

        # And some others
        for i in range(20):
            if i in most_recent:
                continue
            with self.cache.get(inode, i) as fh:
                fh.write("%d" % i)

        # Flush the 2 most recently accessed ones
        commit(self.cache, inode, most_recent[-2])
        commit(self.cache, inode, most_recent[-3])

        # We want to expire 4 entries, 2 of which are already flushed
        self.cache.max_entries = 16
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=2)
        self.cache.expire()
        self.cache.bucket_pool.verify()
        self.assertEqual(len(self.cache.entries), 16)

        for i in range(20):
            if i in most_recent:
                self.assertTrue((inode, i) not in self.cache.entries)
            else:
                self.assertTrue((inode, i) in self.cache.entries)

    def test_upload(self):
        inode = self.inode
        datalen = int(0.1 * self.cache.max_size)
        blockno1 = 21
        blockno2 = 25
        blockno3 = 7

        data1 = self.random_data(datalen)
        data2 = self.random_data(datalen)
        data3 = self.random_data(datalen)

        # Case 1: create new object
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data1)
            el1 = fh
        self.cache.upload(el1)
        self.cache.bucket_pool.verify()

        # Case 2: Link new object
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data1)
            el2 = fh
        self.cache.upload(el2)
        self.cache.bucket_pool.verify()

        # Case 3: Upload old object, still has references
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data2)
        self.cache.upload(el1)
        self.cache.bucket_pool.verify()

        # Case 4: Upload old object, no references left
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1, no_write=1)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data3)
        self.cache.upload(el2)
        self.cache.bucket_pool.verify()

        # Case 5: Link old object, no references left
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data2)
        self.cache.upload(el2)
        self.cache.bucket_pool.verify()

        # Case 6: Link old object, still has references
        # (Need to create another object first)
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1)
        with self.cache.get(inode, blockno3) as fh:
            fh.seek(0)
            fh.write(data1)
            el3 = fh
        self.cache.upload(el3)
        self.cache.bucket_pool.verify()

        self.cache.bucket_pool = TestBucketPool(self.bucket_pool)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.upload(el1)
        self.cache.clear()
        self.cache.bucket_pool.verify()

    def test_remove_referenced(self):
        inode = self.inode
        datalen = int(0.1 * self.cache.max_size)
        blockno1 = 21
        blockno2 = 24
        data = self.random_data(datalen)

        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data)
        self.cache.clear()
        self.cache.bucket_pool.verify()

        self.cache.bucket_pool = TestBucketPool(self.bucket_pool)
        self.cache.remove(inode, blockno1)
        self.cache.bucket_pool.verify()

    def test_remove_cache(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.blocksize))

        # Case 1: Elements only in cache
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.remove(inode, 1)
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertTrue(fh.read(42) == "")

    def test_remove_cache_db(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.blocksize))

        # Case 2: Element in cache and db
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1)
        commit(self.cache, inode)
        self.cache.bucket_pool.verify()
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1)
        self.cache.remove(inode, 1)
        self.cache.bucket_pool.verify()

        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertTrue(fh.read(42) == "")

    def test_remove_db(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.blocksize))

        # Case 3: Element only in DB
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_write=1)
        self.cache.clear()
        self.cache.bucket_pool.verify()
        self.cache.bucket_pool = TestBucketPool(self.bucket_pool, no_del=1)
        self.cache.remove(inode, 1)
        self.cache.bucket_pool.verify()
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertTrue(fh.read(42) == "")
Ejemplo n.º 39
0
class fs_api_tests(TestCase):

    def setUp(self):
        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))
        self.bucket = self.bucket_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
        
        self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir,
                                      self.blocksize * 5)
        self.server = fs.Operations(self.block_cache, self.db, self.blocksize)
          
        self.server.init()

        # Keep track of unused filenames
        self.name_cnt = 0

    def tearDown(self):
        self.server.destroy()
        self.block_cache.destroy()     

        if os.path.exists(self.cachedir):
            shutil.rmtree(self.cachedir)
        shutil.rmtree(self.bucket_dir)
        llfuse.lock.release()

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fd:
            return fd.read(len_)

    def fsck(self):
        self.block_cache.clear()
        self.server.inodes.flush()
        fsck = Fsck(self.cachedir, self.bucket,
                  { 'blocksize': self.blocksize }, self.db)
        fsck.check()
        self.assertFalse(fsck.found_errors)

    def newname(self):
        self.name_cnt += 1
        return "s3ql_%d" % self.name_cnt

    def test_getattr_root(self):
        self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode))
        self.fsck()

    def test_create(self):
        ctx = Ctx()
        mode = self.dir_mode()
        name = self.newname()

        inode_p_old = self.server.getattr(ROOT_INODE).copy()
        time.sleep(CLOCK_GRANULARITY)
        self.server._create(ROOT_INODE, name, mode, ctx)

        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON name_id = names.id '
                              'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))

        inode = self.server.getattr(id_)

        self.assertEqual(inode.mode, mode)
        self.assertEqual(inode.uid, ctx.uid)
        self.assertEqual(inode.gid, ctx.gid)
        self.assertEqual(inode.refcount, 1)
        self.assertEqual(inode.size, 0)

        inode_p_new = self.server.getattr(ROOT_INODE)

        self.assertGreater(inode_p_new.mtime, inode_p_old.mtime)
        self.assertGreater(inode_p_new.ctime, inode_p_old.ctime)

        self.fsck()

    def test_extstat(self):
        # Test with zero contents
        self.server.extstat()

        # Test with empty file
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), Ctx())
        self.server.release(fh)
        self.server.extstat()

        # Test with data in file
        fh = self.server.open(inode.id, os.O_RDWR)
        self.server.write(fh, 0, 'foobar')
        self.server.release(fh)

        self.server.extstat()

        self.fsck()

    @staticmethod
    def dir_mode():
        return (randint(0, 07777) & ~stat.S_IFDIR) | stat.S_IFDIR

    @staticmethod
    def file_mode():
        return (randint(0, 07777) & ~stat.S_IFREG) | stat.S_IFREG

    def test_getxattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), Ctx())
        self.server.release(fh)

        self.assertRaises(FUSEError, self.server.getxattr, inode.id, 'nonexistant-attr')

        self.server.setxattr(inode.id, 'my-attr', 'strabumm!')
        self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!')

        self.fsck()

    def test_link(self):
        name = self.newname()

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(),
                                        self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()

        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), Ctx())
        self.server.release(fh)
        time.sleep(CLOCK_GRANULARITY)

        inode_before = self.server.getattr(inode.id).copy()
        self.server.link(inode.id, inode_p_new.id, name)

        inode_after = self.server.lookup(inode_p_new.id, name)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                              'WHERE name=? AND parent_inode = ?', (name, inode_p_new.id))

        self.assertEqual(inode_before.id, id_)
        self.assertEqual(inode_after.refcount, 2)
        self.assertGreater(inode_after.ctime, inode_before.ctime)
        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)

        self.fsck()

    def test_listxattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), Ctx())
        self.server.release(fh)

        self.assertListEqual([], self.server.listxattr(inode.id))

        self.server.setxattr(inode.id, 'key1', 'blub')
        self.assertListEqual(['key1'], self.server.listxattr(inode.id))

        self.server.setxattr(inode.id, 'key2', 'blub')
        self.assertListEqual(sorted(['key1', 'key2']),
                             sorted(self.server.listxattr(inode.id)))

        self.fsck()

    def test_read(self):

        len_ = self.blocksize
        data = self.random_data(len_)
        off = self.blocksize // 2
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                     self.file_mode(), Ctx())

        self.server.write(fh, off, data)
        inode_before = self.server.getattr(inode.id).copy()
        time.sleep(CLOCK_GRANULARITY)
        self.assertTrue(self.server.read(fh, off, len_) == data)
        inode_after = self.server.getattr(inode.id)
        self.assertGreater(inode_after.atime, inode_before.atime)
        self.assertTrue(self.server.read(fh, 0, len_) == b"\0" * off + data[:off])
        self.assertTrue(self.server.read(fh, self.blocksize, len_) == data[off:])
        self.server.release(fh)

        self.fsck()

    def test_readdir(self):

        # Create a few entries
        names = [ 'entry_%2d' % i for i in range(20) ]
        for name in names:
            (fh, _) = self.server.create(ROOT_INODE, name,
                                         self.file_mode(), Ctx())
            self.server.release(fh)
            
        # Delete some to make sure that we don't have continous rowids
        remove_no = [0, 2, 3, 5, 9]
        for i in remove_no:
            self.server.unlink(ROOT_INODE, names[i])
            del names[i]

        # Read all
        fh = self.server.opendir(ROOT_INODE)
        self.assertListEqual(sorted(names + ['lost+found']) ,
                             sorted(x[0] for x in self.server.readdir(fh, 0)))
        self.server.releasedir(fh)

        # Read in parts
        fh = self.server.opendir(ROOT_INODE)
        entries = list()
        try:
            next_ = 0
            while True:
                gen = self.server.readdir(fh, next_)
                for _ in range(3):
                    (name, _, next_) = next(gen)
                    entries.append(name)
                    
        except StopIteration:
            pass

        self.assertListEqual(sorted(names + ['lost+found']) ,
                             sorted(entries))
        self.server.releasedir(fh)

        self.fsck()

    def test_release(self):
        name = self.newname()

        # Test that entries are deleted when they're no longer referenced
        (fh, inode) = self.server.create(ROOT_INODE, name,
                                         self.file_mode(), Ctx())
        self.server.write(fh, 0, 'foobar')
        self.server.unlink(ROOT_INODE, name)
        self.assertFalse(self.db.has_val('SELECT 1 FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertTrue(self.server.getattr(inode.id).id)
        self.server.release(fh)

        self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))

        self.fsck()

    def test_removexattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), Ctx())
        self.server.release(fh)

        self.assertRaises(FUSEError, self.server.removexattr, inode.id, 'some name')
        self.server.setxattr(inode.id, 'key1', 'blub')
        self.server.removexattr(inode.id, 'key1')
        self.assertListEqual([], self.server.listxattr(inode.id))

        self.fsck()

    def test_rename(self):
        oldname = self.newname()
        newname = self.newname()

        inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
        inode_p_old_before = self.server.getattr(ROOT_INODE).copy()
        time.sleep(CLOCK_GRANULARITY)

        self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)

        inode_p_old_after = self.server.getattr(ROOT_INODE)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE)))
        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id == name_id '
                              'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id))
        self.assertEqual(inode.id, id_)

        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
        self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)


        self.fsck()

    def test_replace_file(self):
        oldname = self.newname()
        newname = self.newname()

        (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(), Ctx())
        self.server.write(fh, 0, 'some data to deal with')
        self.server.release(fh)
        self.server.setxattr(inode.id, 'test_xattr', '42*8')

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
        inode_p_old_before = self.server.getattr(ROOT_INODE).copy()

        (fh, inode2) = self.server.create(inode_p_new.id, newname, self.file_mode(), Ctx())
        self.server.write(fh, 0, 'even more data to deal with')
        self.server.release(fh)
        self.server.setxattr(inode2.id, 'test_xattr', '42*8')

        time.sleep(CLOCK_GRANULARITY)
        self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)

        inode_p_old_after = self.server.getattr(ROOT_INODE)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE)))
        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                              'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id))
        self.assertEqual(inode.id, id_)

        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
        self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)

        self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,)))

        self.fsck()

    def test_replace_dir(self):
        oldname = self.newname()
        newname = self.newname()

        inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(), self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
        inode_p_old_before = self.server.getattr(ROOT_INODE).copy()

        inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(), Ctx())

        time.sleep(CLOCK_GRANULARITY)
        self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)

        inode_p_old_after = self.server.getattr(ROOT_INODE)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE)))
        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                              'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id))
        self.assertEqual(inode.id, id_)

        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
        self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)

        self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id,)))

        self.fsck()

    def test_setattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0641, Ctx())
        self.server.release(fh)
        inode_old = self.server.getattr(inode.id).copy()

        attr = llfuse.EntryAttributes()
        attr.st_mode = self.file_mode()
        attr.st_uid = randint(0, 2 ** 32)
        attr.st_gid = randint(0, 2 ** 32)
        attr.st_rdev = randint(0, 2 ** 32)
        attr.st_atime = time.timezone + randint(0, 2 ** 32) / 10 ** 6
        attr.st_mtime = time.timezone + randint(0, 2 ** 32) / 10 ** 6

        time.sleep(CLOCK_GRANULARITY)
        self.server.setattr(inode.id, attr)
        inode_new = self.server.getattr(inode.id)
        self.assertGreater(inode_new.ctime, inode_old.ctime)

        for key in attr.__slots__:
            if getattr(attr, key) is not None:
                self.assertEquals(getattr(attr, key), 
                                  getattr(inode_new, key))


    def test_truncate(self):
        len_ = int(2.7 * self.blocksize)
        data = self.random_data(len_)
        attr = llfuse.EntryAttributes()

        (fh, inode) = self.server.create(ROOT_INODE, self.newname(), self.file_mode(), Ctx())
        self.server.write(fh, 0, data)
        
        attr.st_size = len_ // 2
        self.server.setattr(inode.id, attr)
        self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2])
        attr.st_size = len_ 
        self.server.setattr(inode.id, attr)
        self.assertTrue(self.server.read(fh, 0, len_)
                        == data[:len_ // 2] + b'\0' * (len_ // 2))
        self.server.release(fh)

        self.fsck()

    def test_truncate_0(self):
        len1 = 158
        len2 = 133
        attr = llfuse.EntryAttributes()
        
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), Ctx())
        self.server.write(fh, 0, self.random_data(len1))
        self.server.release(fh)
        self.server.inodes.flush()
        
        fh = self.server.open(inode.id, os.O_RDWR)     
        attr.st_size = 0
        self.server.setattr(inode.id, attr)
        self.server.write(fh, 0, self.random_data(len2))
        self.server.release(fh)

        self.fsck()
        
    def test_setxattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), Ctx())
        self.server.release(fh)

        self.server.setxattr(inode.id, 'my-attr', 'strabumm!')
        self.assertEqual(self.server.getxattr(inode.id, 'my-attr'), 'strabumm!')

        self.fsck()

    def test_statfs(self):
        # Test with zero contents
        self.server.statfs()

        # Test with empty file
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), Ctx())
        self.server.release(fh)
        self.server.statfs()

        # Test with data in file
        fh = self.server.open(inode.id, None)
        self.server.write(fh, 0, 'foobar')
        self.server.release(fh)

        self.server.statfs()

    def test_symlink(self):
        target = self.newname()
        name = self.newname()

        inode_p_before = self.server.getattr(ROOT_INODE).copy()
        time.sleep(CLOCK_GRANULARITY)
        inode = self.server.symlink(ROOT_INODE, name, target, Ctx())
        inode_p_after = self.server.getattr(ROOT_INODE)

        self.assertEqual(target, self.server.readlink(inode.id))

        id_ = self.db.get_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                              'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))

        self.assertEqual(inode.id, id_)
        self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
        self.assertLess(inode_p_before.ctime, inode_p_after.ctime)


    def test_unlink(self):
        name = self.newname()

        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx())
        self.server.write(fh, 0, 'some data to deal with')
        self.server.release(fh)

        # Add extended attributes
        self.server.setxattr(inode.id, 'test_xattr', '42*8')

        inode_p_before = self.server.getattr(ROOT_INODE).copy()
        time.sleep(CLOCK_GRANULARITY)
        self.server.unlink(ROOT_INODE, name)
        inode_p_after = self.server.getattr(ROOT_INODE)

        self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
        self.assertLess(inode_p_before.ctime, inode_p_after.ctime)

        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))

        self.fsck()

    def test_rmdir(self):
        name = self.newname()
        inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx())
        inode_p_before = self.server.getattr(ROOT_INODE).copy()
        time.sleep(CLOCK_GRANULARITY)
        self.server.rmdir(ROOT_INODE, name)
        inode_p_after = self.server.getattr(ROOT_INODE)

        self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
        self.assertLess(inode_p_before.ctime, inode_p_after.ctime)
        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))

        self.fsck()

    def test_relink(self):
        name = self.newname()
        name2 = self.newname()
        data = 'some data to deal with'

        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(), Ctx())
        self.server.write(fh, 0, data)
        self.server.unlink(ROOT_INODE, name)
        self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                         'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertTrue(self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id,)))

        self.server.link(inode.id, ROOT_INODE, name2)
        self.server.release(fh)

        fh = self.server.open(inode.id, os.O_RDONLY)
        self.assertTrue(self.server.read(fh, 0, len(data)) == data)
        self.server.release(fh)
        self.fsck()

    def test_write(self):
        len_ = self.blocksize
        data = self.random_data(len_)
        off = self.blocksize // 2
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                     self.file_mode(), Ctx())
        inode_before = self.server.getattr(inode.id).copy()
        time.sleep(CLOCK_GRANULARITY)
        self.server.write(fh, off, data)
        inode_after = self.server.getattr(inode.id)

        self.assertGreater(inode_after.mtime, inode_before.mtime)
        self.assertGreater(inode_after.ctime, inode_before.ctime)
        self.assertEqual(inode_after.size, off + len_)

        self.server.write(fh, 0, data)
        inode_after = self.server.getattr(inode.id)
        self.assertEqual(inode_after.size, off + len_)

        self.server.release(fh)

        self.fsck()

    def test_edit(self):
        len_ = self.blocksize
        data = self.random_data(len_)
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                     self.file_mode(), Ctx())
        self.server.write(fh, 0, data)
        self.server.release(fh)
        
        self.block_cache.clear()
        
        fh = self.server.open(inode.id, os.O_RDWR)
        attr = llfuse.EntryAttributes()
        attr.st_size = 0
        self.server.setattr(inode.id, attr)
        self.server.write(fh, 0, data[50:])
        self.server.release(fh)
        
        self.fsck()
        
    def test_copy_tree(self):

        src_inode = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())
        dst_inode = self.server.mkdir(ROOT_INODE, 'dest', self.dir_mode(), Ctx())

        # Create file
        (fh, f1_inode) = self.server.create(src_inode.id, 'file1',
                                            self.file_mode(), Ctx())
        self.server.write(fh, 0, 'file1 contents')
        self.server.release(fh)

        # Create hardlink
        (fh, f2_inode) = self.server.create(src_inode.id, 'file2',
                                            self.file_mode(), Ctx())
        self.server.write(fh, 0, 'file2 contents')
        self.server.release(fh)
        f2_inode = self.server.link(f2_inode.id, src_inode.id, 'file2_hardlink')

        # Create subdirectory
        d1_inode = self.server.mkdir(src_inode.id, 'dir1', self.dir_mode(), Ctx())
        d2_inode = self.server.mkdir(d1_inode.id, 'dir2', self.dir_mode(), Ctx())

        # ..with a 3rd hardlink
        f2_inode = self.server.link(f2_inode.id, d1_inode.id, 'file2_hardlink')

        # Replicate
        self.server.copy_tree(src_inode.id, dst_inode.id)

        # Change files
        fh = self.server.open(f1_inode.id, os.O_RDWR)
        self.server.write(fh, 0, 'new file1 contents')
        self.server.release(fh)

        fh = self.server.open(f2_inode.id, os.O_RDWR)
        self.server.write(fh, 0, 'new file2 contents')
        self.server.release(fh)

        # Get copy properties
        f1_inode_c = self.server.lookup(dst_inode.id, 'file1')
        f2_inode_c = self.server.lookup(dst_inode.id, 'file2')
        f2h_inode_c = self.server.lookup(dst_inode.id, 'file2_hardlink')
        d1_inode_c = self.server.lookup(dst_inode.id, 'dir1')
        d2_inode_c = self.server.lookup(d1_inode_c.id, 'dir2')
        f2_h_inode_c = self.server.lookup(d1_inode_c.id, 'file2_hardlink')
        
        # Check file1
        fh = self.server.open(f1_inode_c.id, os.O_RDWR)
        self.assertEqual(self.server.read(fh, 0, 42), 'file1 contents')
        self.server.release(fh)
        self.assertNotEqual(f1_inode.id, f1_inode_c.id)

        # Check file2
        fh = self.server.open(f2_inode_c.id, os.O_RDWR)
        self.assertTrue(self.server.read(fh, 0, 42) == 'file2 contents')
        self.server.release(fh)
        self.assertEqual(f2_inode_c.id, f2h_inode_c.id)
        self.assertEqual(f2_inode_c.refcount, 3)
        self.assertNotEqual(f2_inode.id, f2_inode_c.id)
        self.assertEqual(f2_h_inode_c.id, f2_inode_c.id)
        
        # Check subdir1
        self.assertNotEqual(d1_inode.id, d1_inode_c.id)
        self.assertNotEqual(d2_inode.id, d2_inode_c.id)     
        
        self.fsck()

    def test_lock_tree(self):

        inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())

        # Create file
        (fh, inode1a) = self.server.create(inode1.id, 'file1',
                                            self.file_mode(), Ctx())
        self.server.write(fh, 0, 'file1 contents')
        self.server.release(fh)

        # Create subdirectory
        inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx())
        (fh, inode2a) = self.server.create(inode2.id, 'file2',
                                           self.file_mode(), Ctx())
        self.server.write(fh, 0, 'file2 contents')
        self.server.release(fh)   

        # Another file
        (fh, inode3) = self.server.create(ROOT_INODE, 'file1',
                                          self.file_mode(), Ctx())
        self.server.release(fh)  
        
        # Lock
        self.server.lock_tree(inode1.id)
        
        for i in (inode1.id, inode1a.id, inode2.id, inode2a.id):
            self.assertTrue(self.server.inodes[i].locked)
        
        # Remove
        with self.assertRaises(FUSEError) as cm:
            self.server._remove(inode1.id, 'file1', inode1a.id)
        self.assertEqual(cm.exception.errno, errno.EPERM)
        
        # Rename / Replace
        with self.assertRaises(FUSEError) as cm:
            self.server.rename(ROOT_INODE, 'file1', inode1.id, 'file2')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        with self.assertRaises(FUSEError) as cm:
            self.server.rename(inode1.id, 'file1', ROOT_INODE, 'file2')
        self.assertEqual(cm.exception.errno, errno.EPERM)
                
        # Open
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode2a.id, os.O_RDWR)
        self.assertEqual(cm.exception.errno, errno.EPERM)
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode2a.id, os.O_WRONLY)
        self.assertEqual(cm.exception.errno, errno.EPERM)
        self.server.release(self.server.open(inode3.id, os.O_WRONLY))
                
        # Write
        fh = self.server.open(inode2a.id, os.O_RDONLY)
        with self.assertRaises(FUSEError) as cm:
            self.server.write(fh, 0, 'foo')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        self.server.release(fh)
        
        # Create
        with self.assertRaises(FUSEError) as cm:
            self.server._create(inode2.id, 'dir1', self.dir_mode(), Ctx())
        self.assertEqual(cm.exception.errno, errno.EPERM)
                 
        # Setattr
        with self.assertRaises(FUSEError) as cm:
            self.server.setattr(inode2a.id, dict())
        self.assertEqual(cm.exception.errno, errno.EPERM)
        
        # xattr
        with self.assertRaises(FUSEError) as cm:
            self.server.setxattr(inode2.id, 'name', 'value')
        self.assertEqual(cm.exception.errno, errno.EPERM)        
        with self.assertRaises(FUSEError) as cm:
            self.server.removexattr(inode2.id, 'name')
        self.assertEqual(cm.exception.errno, errno.EPERM)        

        self.fsck()
        
    def test_remove_tree(self):

        inode1 = self.server.mkdir(ROOT_INODE, 'source', self.dir_mode(), Ctx())

        # Create file
        (fh, inode1a) = self.server.create(inode1.id, 'file1',
                                            self.file_mode(), Ctx())
        self.server.write(fh, 0, 'file1 contents')
        self.server.release(fh)

        # Create subdirectory
        inode2 = self.server.mkdir(inode1.id, 'dir1', self.dir_mode(), Ctx())
        (fh, inode2a) = self.server.create(inode2.id, 'file2',
                                           self.file_mode(), Ctx())
        self.server.write(fh, 0, 'file2 contents')
        self.server.release(fh)   

        # Remove
        self.server.remove_tree(ROOT_INODE, 'source')

        for (id_p, name) in ((ROOT_INODE, 'source'),
                             (inode1.id, 'file1'),
                             (inode1.id, 'dir1'),
                             (inode2.id, 'file2')):
            self.assertFalse(self.db.has_val('SELECT inode FROM contents JOIN names ON names.id = name_id '
                                             'WHERE name=? AND parent_inode = ?', (name, id_p)))
            
        for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id):
            self.assertFalse(self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_,)))
        
        self.fsck()
Ejemplo n.º 40
0
    def test(self):
        skip_without_rsync()
        ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-')
        try:
            populate_dir(ref_dir)

            # Make file system and fake high inode number
            self.mkfs()
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2 ** 31 + 10, 'inodes'))
            db.close()

            # Copy source data
            self.mount()
            subprocess.check_call(['rsync', '-aHAX', ref_dir + '/',
                                   self.mnt_dir + '/'])
            self.umount()

            # Check that inode watermark is high
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                              ('inodes',)) > 2 ** 31 + 10
            assert db.get_val('SELECT MAX(id) FROM inodes') > 2 ** 31 + 10
            db.close()

            # Renumber inodes
            self.fsck()

            # Check if renumbering was done
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                               ('inodes',)) < 2 ** 31
            assert db.get_val('SELECT MAX(id) FROM inodes') < 2 ** 31
            db.close()

            # Compare
            self.mount()
            try:
                out = check_output(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found',
                                   ref_dir + '/', self.mnt_dir + '/'], universal_newlines=True,
                                  stderr=subprocess.STDOUT)
            except CalledProcessError as exc:
                pytest.fail('rsync failed with ' + exc.output)
            if out:
                pytest.fail('Copy not equal to original, rsync says:\n' + out)

            self.umount()
        finally:
            shutil.rmtree(ref_dir)
Ejemplo n.º 41
0
class cache_tests(TestCase):

    def setUp(self):
        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)
        self.cache = inode_cache.InodeCache(self.db)

    def tearDown(self):
        self.cache.destroy()

    def test_create(self):
        attrs = {'mode': 784,
                 'refcount': 3,
                 'uid': 7,
                 'gid': 2,
                 'size': 34674,
                 'rdev': 11,
                 'atime': time.time(),
                 'ctime': time.time(),
                 'mtime': time.time() }

        inode = self.cache.create_inode(**attrs)

        for key in attrs.keys():
            self.assertEqual(attrs[key], getattr(inode, key))

        self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))


    def test_del(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }
        inode = self.cache.create_inode(**attrs)
        del self.cache[inode.id]
        self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
        self.assertRaises(KeyError, self.cache.__delitem__, inode.id)

    def test_get(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }
        
        inode = self.cache.create_inode(**attrs)
        for (key, val) in attrs.iteritems():
            self.assertEqual(getattr(inode, key), val)

        # Create another inode
        self.cache.create_inode(**attrs)
        
        self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,))
        # Entry should still be in cache
        self.assertEqual(inode, self.cache[inode.id])

        # Now it should be out of the cache
        for _ in xrange(inode_cache.CACHE_SIZE + 1):
            self.cache.create_inode(**attrs)

        self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
Ejemplo n.º 42
0
class cache_tests(unittest.TestCase):
    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(
            lambda: local.Backend('local://' + self.backend_dir, None, None))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?,?)",
            (self.inode,
             stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
             | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
             os.getuid(), os.getgid(), time.time(), time.time(), time.time(),
             1, 32))

        cache = BlockCache(self.backend_pool, self.db,
                           self.cachedir + "/cache", self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg):
                cache._do_upload(*arg)

        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

    def tearDown(self):
        llfuse.lock.release()
        self.cache.backend_pool = self.backend_pool
        self.cache.destroy()
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        os.unlink(self.dbfile.name)

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fh:
            return fh.read(len_)

    def test_get(self):
        inode = self.inode
        blockno = 11
        data = self.random_data(int(0.5 * self.max_obj_size))

        # Case 1: Object does not exist yet
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data)

        # Case 2: Object is in cache
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

        # Case 3: Object needs to be downloaded
        self.cache.clear()
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

    def test_expire(self):
        inode = self.inode

        # Define the 4 most recently accessed ones
        most_recent = [7, 11, 10, 8]
        for i in most_recent:
            time.sleep(0.2)
            with self.cache.get(inode, i) as fh:
                fh.write(('%d' % i).encode())

        # And some others
        for i in range(20):
            if i in most_recent:
                continue
            with self.cache.get(inode, i) as fh:
                fh.write(('%d' % i).encode())

        # Flush the 2 most recently accessed ones
        commit(self.cache, inode, most_recent[-2])
        commit(self.cache, inode, most_recent[-3])

        # We want to expire 4 entries, 2 of which are already flushed
        self.cache.cache.max_entries = 16
        self.cache.backend_pool = TestBackendPool(self.backend_pool,
                                                  no_write=2)
        self.cache.expire()
        self.cache.backend_pool.verify()
        self.assertEqual(len(self.cache.cache), 16)

        for i in range(20):
            if i in most_recent:
                self.assertTrue((inode, i) not in self.cache.cache)
            else:
                self.assertTrue((inode, i) in self.cache.cache)

    def test_upload(self):
        inode = self.inode
        datalen = int(0.1 * self.cache.cache.max_size)
        blockno1 = 21
        blockno2 = 25
        blockno3 = 7

        data1 = self.random_data(datalen)
        data2 = self.random_data(datalen)
        data3 = self.random_data(datalen)

        # Case 1: create new object
        self.cache.backend_pool = TestBackendPool(self.backend_pool,
                                                  no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data1)
            el1 = fh
        self.cache.upload(el1)
        self.cache.backend_pool.verify()

        # Case 2: Link new object
        self.cache.backend_pool = TestBackendPool(self.backend_pool)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data1)
            el2 = fh
        self.cache.upload(el2)
        self.cache.backend_pool.verify()

        # Case 3: Upload old object, still has references
        self.cache.backend_pool = TestBackendPool(self.backend_pool,
                                                  no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data2)
        self.cache.upload(el1)
        self.cache.backend_pool.verify()

        # Case 4: Upload old object, no references left
        self.cache.backend_pool = TestBackendPool(self.backend_pool,
                                                  no_del=1,
                                                  no_write=1)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data3)
        self.cache.upload(el2)
        self.cache.backend_pool.verify()

        # Case 5: Link old object, no references left
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_del=1)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data2)
        self.cache.upload(el2)
        self.cache.backend_pool.verify()

        # Case 6: Link old object, still has references
        # (Need to create another object first)
        self.cache.backend_pool = TestBackendPool(self.backend_pool,
                                                  no_write=1)
        with self.cache.get(inode, blockno3) as fh:
            fh.seek(0)
            fh.write(data1)
            el3 = fh
        self.cache.upload(el3)
        self.cache.backend_pool.verify()

        self.cache.backend_pool = TestBackendPool(self.backend_pool)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.upload(el1)
        self.cache.clear()
        self.cache.backend_pool.verify()

    def test_remove_referenced(self):
        inode = self.inode
        datalen = int(0.1 * self.cache.cache.max_size)
        blockno1 = 21
        blockno2 = 24
        data = self.random_data(datalen)

        self.cache.backend_pool = TestBackendPool(self.backend_pool,
                                                  no_write=1)
        with self.cache.get(inode, blockno1) as fh:
            fh.seek(0)
            fh.write(data)
        with self.cache.get(inode, blockno2) as fh:
            fh.seek(0)
            fh.write(data)
        self.cache.clear()
        self.cache.backend_pool.verify()

        self.cache.backend_pool = TestBackendPool(self.backend_pool)
        self.cache.remove(inode, blockno1)
        self.cache.backend_pool.verify()

    def test_remove_cache(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.max_obj_size))

        # Case 1: Elements only in cache
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.remove(inode, 1)
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertEqual(fh.read(42), b'')

    def test_upload_race(self):
        inode = self.inode
        blockno = 1
        data1 = self.random_data(int(0.4 * self.max_obj_size))
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data1)

        # Remove it
        self.cache.remove(inode, blockno)

        # Try to upload it, may happen if CommitThread is interrupted
        self.cache.upload(fh)

    def test_expire_race(self):
        # Create element
        inode = self.inode
        blockno = 1
        data1 = self.random_data(int(0.4 * self.max_obj_size))
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.upload(fh)

        # Make sure entry will be expired
        self.cache.cache.max_entries = 0

        def e_w_l():
            with llfuse.lock:
                self.cache.expire()

        # Lock it
        self.cache._lock_entry(inode, blockno, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(e_w_l)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(e_w_l)
            t2.start()

            # Release lock
            with llfuse.lock_released:
                time.sleep(0.1)
                self.cache._unlock_entry(inode, blockno)
                t1.join_and_raise()
                t2.join_and_raise()

            assert len(self.cache.cache) == 0
        finally:
            self.cache._unlock_entry(inode,
                                     blockno,
                                     release_global=True,
                                     noerror=True)

    def test_parallel_expire(self):
        # Create elements
        inode = self.inode
        for i in range(5):
            data1 = self.random_data(int(0.4 * self.max_obj_size))
            with self.cache.get(inode, i) as fh:
                fh.write(data1)

        # We want to expire just one element, but have
        # several threads running expire() simultaneously
        self.cache.cache.max_entries = 4

        def e_w_l():
            with llfuse.lock:
                self.cache.expire()

        # Lock first element so that we have time to start threads
        self.cache._lock_entry(inode, 0, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(e_w_l)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(e_w_l)
            t2.start()

            # Release lock
            with llfuse.lock_released:
                time.sleep(0.1)
                self.cache._unlock_entry(inode, 0)
                t1.join_and_raise()
                t2.join_and_raise()

            assert len(self.cache.cache) == 4
        finally:
            self.cache._unlock_entry(inode,
                                     0,
                                     release_global=True,
                                     noerror=True)

    def test_remove_cache_db(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.max_obj_size))

        # Case 2: Element in cache and db
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.backend_pool = TestBackendPool(self.backend_pool,
                                                  no_write=1)
        commit(self.cache, inode)
        self.cache.backend_pool.verify()
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_del=1)
        self.cache.remove(inode, 1)
        self.cache.backend_pool.verify()

        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertEqual(fh.read(42), b'')

    def test_remove_db(self):
        inode = self.inode
        data1 = self.random_data(int(0.4 * self.max_obj_size))

        # Case 3: Element only in DB
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.backend_pool = TestBackendPool(self.backend_pool,
                                                  no_write=1)
        self.cache.clear()
        self.cache.backend_pool.verify()
        self.cache.backend_pool = TestBackendPool(self.backend_pool, no_del=1)
        self.cache.remove(inode, 1)
        self.cache.backend_pool.verify()
        with self.cache.get(inode, 1) as fh:
            fh.seek(0)
            self.assertEqual(fh.read(42), b'')
Ejemplo n.º 43
0
class DumpTests(unittest.TestCase):
    def setUp(self):
        self.tmpfh1 = tempfile.NamedTemporaryFile()
        self.tmpfh2 = tempfile.NamedTemporaryFile()
        self.src = Connection(self.tmpfh1.name)
        self.dst = Connection(self.tmpfh2.name)
        self.fh = tempfile.TemporaryFile()

        # Disable exclusive locking for all tests
        self.src.execute('PRAGMA locking_mode = NORMAL')
        self.dst.execute('PRAGMA locking_mode = NORMAL')

        self.create_table(self.src)
        self.create_table(self.dst)

    def tearDown(self):
        self.src.close()
        self.dst.close()
        self.tmpfh1.close()
        self.tmpfh2.close()
        self.fh.close()

    def test_transactions(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        self.dst.execute('PRAGMA journal_mode = WAL')

        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh,
                             trx_rows=10)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_1(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_2(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_3(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_auto(self):
        self.fill_vals(self.src)
        self.fill_buf(self.src)
        dumpspec = (('id', deltadump.INTEGER), ('buf', deltadump.BLOB))
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_fixed(self):
        BUFLEN = 32
        self.fill_vals(self.src)
        self.fill_buf(self.src, BUFLEN)
        dumpspec = (('id', deltadump.INTEGER), ('buf', deltadump.BLOB, BUFLEN))
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_1(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_2(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_3(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_5_multi(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.dst.execute('DELETE FROM test')
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def compare_tables(self, db1, db2):
        i1 = db1.query('SELECT id, buf FROM test ORDER BY id')
        i2 = db2.query('SELECT id, buf FROM test ORDER BY id')

        for (id1, buf1) in i1:
            (id2, buf2) = next(i2)

            self.assertEqual(id1, id2)
            if isinstance(buf1, float):
                self.assertAlmostEqual(buf1, buf2, places=9)
            else:
                self.assertEqual(buf1, buf2)

        self.assertRaises(StopIteration, i2.__next__)

    def fill_buf(self, db, len_=None):
        with open('/dev/urandom', 'rb') as rfh:
            first = True
            for (id_, ) in db.query('SELECT id FROM test'):
                if len_ is None and first:
                    val = b''  # We always want to check this case
                    first = False
                elif len_ is None:
                    val = rfh.read(random.randint(0, 140))
                else:
                    val = rfh.read(len_)

                db.execute('UPDATE test SET buf=? WHERE id=?', (val, id_))

    def fill_vals(self, db):
        vals = []
        for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33, 62]:
            vals += list(range(2**exp - 5, 2**exp + 6))
        vals += list(range(2**63 - 5, 2**63))
        vals += [-v for v in vals]
        vals.append(-(2**63))

        for val in vals:
            db.execute('INSERT INTO test (id) VALUES(?)', (val, ))

    def fill_deltas(self, db):
        deltas = []
        for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33]:
            deltas += list(range(2**exp - 5, 2**exp + 6))
        deltas += [-v for v in deltas]

        last = 0
        for delta in deltas:
            val = last + delta
            last = val
            db.execute('INSERT INTO test (id) VALUES(?)', (val, ))

    def create_table(self, db):
        db.execute('''CREATE TABLE test (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            buf BLOB)''')
Ejemplo n.º 44
0
def get_metadata(bucket, cachepath, readonly=False):
    '''Retrieve metadata
    
    Checks:
    - Revision
    - Unclean mounts
    
    Locally cached metadata is used if up-to-date.
    '''
                
    seq_no = get_seq_no(bucket)

    # Check for cached metadata
    db = None
    if os.path.exists(cachepath + '.params'):
        param = pickle.load(open(cachepath + '.params', 'rb'))
        if param['seq_no'] < seq_no:
            log.info('Ignoring locally cached metadata (outdated).')
            param = bucket.lookup('s3ql_metadata')
        else:
            log.info('Using cached metadata.')
            db = Connection(cachepath + '.db')
    else:
        param = bucket.lookup('s3ql_metadata')
 
    # Check for unclean shutdown
    if param['seq_no'] < seq_no:
        if bucket.is_get_consistent():
           #raise QuietError(textwrap.fill(textwrap.dedent('''\
           #    It appears that the file system is still mounted somewhere else. If this is not
           #    the case, the file system may have not been unmounted cleanly and you should try
           #    to run fsck on the computer where the file system has been mounted most recently.
           #    ''')))
            log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint but local bucket is consistent, just ignore it")
        else:                
           #raise QuietError(textwrap.fill(textwrap.dedent('''\
           #    It appears that the file system is still mounted somewhere else. If this is not the
           #    case, the file system may have not been unmounted cleanly or the data from the 
           #    most-recent mount may have not yet propagated through the backend. In the later case,
           #    waiting for a while should fix the problem, in the former case you should try to run
           #    fsck on the computer where the file system has been mounted most recently.
           #    ''')))
            log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint and local bucket is inconsistent, could not ignore the error")
       
    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new, please update your '
                         'S3QL installation.')
        
    # Check that the fs itself is clean
    if not readonly and param['needs_fsck']:
        raise QuietError("File system damaged or not unmounted cleanly, run fsck!")        
    if (time.time() - time.timezone) - param['last_fsck'] > 60 * 60 * 24 * 31:
        log.warn('Last file system check was more than 1 month ago, '
                 'running fsck.s3ql is recommended.')
    
    # Download metadata
    if not db:
        log.info("Downloading & uncompressing metadata...")
        os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                         stat.S_IRUSR | stat.S_IWUSR)) 
        db = Connection(cachepath + '.db.tmp', fast_mode=True)
        with bucket.open_read("s3ql_metadata") as fh:
            restore_metadata(fh, db)
        db.close()
        os.rename(cachepath + '.db.tmp', cachepath + '.db')
        db = Connection(cachepath + '.db')
 
    # Increase metadata sequence no 
    param['seq_no'] += 1
    param['needs_fsck'] = True
    bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
    param['needs_fsck'] = False
    
    return (param, db)
Ejemplo n.º 45
0
class fs_api_tests(unittest.TestCase):
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        plain_backend = local.Backend('local://' + self.backend_dir, None,
                                      None)
        self.backend_pool = BackendPool(
            lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        cache = BlockCache(self.backend_pool, self.db,
                           self.cachedir + "/cache", self.max_obj_size * 5)
        self.block_cache = cache
        self.server = fs.Operations(cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))
        self.server.init()

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True

        cache.to_upload = DummyDistributor()

        # Keep track of unused filenames
        self.name_cnt = 0

    def tearDown(self):
        self.server.inodes.destroy()
        llfuse.lock.release()
        self.block_cache.destroy()
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        os.unlink(self.dbfile.name)
        self.dbfile.close()

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fd:
            return fd.read(len_)

    def fsck(self):
        self.block_cache.clear()
        self.server.inodes.flush()
        fsck = Fsck(self.cachedir + '/cache', self.backend,
                    {'max_obj_size': self.max_obj_size}, self.db)
        fsck.check()
        self.assertFalse(fsck.found_errors)

    def newname(self):
        self.name_cnt += 1
        return ("s3ql_%d" % self.name_cnt).encode()

    def test_getattr_root(self):
        self.assertTrue(stat.S_ISDIR(self.server.getattr(ROOT_INODE).mode))
        self.fsck()

    def test_create(self):
        ctx = Ctx()
        mode = self.dir_mode()
        name = self.newname()

        inode_p_old = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.server._create(ROOT_INODE, name, mode, ctx)

        id_ = self.db.get_val(
            'SELECT inode FROM contents JOIN names ON name_id = names.id '
            'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))

        inode = self.server.getattr(id_)

        self.assertEqual(inode.mode, mode)
        self.assertEqual(inode.uid, ctx.uid)
        self.assertEqual(inode.gid, ctx.gid)
        self.assertEqual(inode.refcount, 1)
        self.assertEqual(inode.size, 0)

        inode_p_new = self.server.getattr(ROOT_INODE)

        self.assertGreater(inode_p_new.mtime, inode_p_old.mtime)
        self.assertGreater(inode_p_new.ctime, inode_p_old.ctime)

        self.server.forget([(id_, 1)])
        self.fsck()

    def test_extstat(self):
        # Test with zero contents
        self.server.extstat()

        # Test with empty file
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.extstat()

        # Test with data in file
        fh = self.server.open(inode.id, os.O_RDWR)
        self.server.write(fh, 0, b'foobar')
        self.server.release(fh)

        self.server.extstat()
        self.server.forget([(inode.id, 1)])
        self.fsck()

    @staticmethod
    def dir_mode():
        return (randint(0, 0o7777) & ~stat.S_IFDIR) | stat.S_IFDIR

    @staticmethod
    def file_mode():
        return (randint(0, 0o7777) & ~stat.S_IFREG) | stat.S_IFREG

    def test_getxattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        self.assertRaises(FUSEError, self.server.getxattr, inode.id,
                          b'nonexistant-attr')

        self.server.setxattr(inode.id, b'my-attr', b'strabumm!')
        self.assertEqual(self.server.getxattr(inode.id, b'my-attr'),
                         b'strabumm!')

        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_link(self):
        name = self.newname()

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(),
                                        self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()

        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)
        safe_sleep(CLOCK_GRANULARITY)

        inode_before = self.server.getattr(inode.id).copy()
        self.server.link(inode.id, inode_p_new.id, name)

        inode_after = self.server.lookup(inode_p_new.id, name)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        id_ = self.db.get_val(
            'SELECT inode FROM contents JOIN names ON names.id = name_id '
            'WHERE name=? AND parent_inode = ?', (name, inode_p_new.id))

        self.assertEqual(inode_before.id, id_)
        self.assertEqual(inode_after.refcount, 2)
        self.assertGreater(inode_after.ctime, inode_before.ctime)
        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.server.forget([(inode.id, 1), (inode_p_new.id, 1),
                            (inode_after.id, 1)])
        self.fsck()

    def test_listxattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        self.assertListEqual([], self.server.listxattr(inode.id))

        self.server.setxattr(inode.id, b'key1', b'blub')
        self.assertListEqual([b'key1'], self.server.listxattr(inode.id))

        self.server.setxattr(inode.id, b'key2', b'blub')
        self.assertListEqual(sorted([b'key1', b'key2']),
                             sorted(self.server.listxattr(inode.id)))
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_read(self):

        len_ = self.max_obj_size
        data = self.random_data(len_)
        off = self.max_obj_size // 2
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())

        self.server.write(fh, off, data)
        inode_before = self.server.getattr(inode.id).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.assertTrue(self.server.read(fh, off, len_) == data)
        inode_after = self.server.getattr(inode.id)
        self.assertGreater(inode_after.atime, inode_before.atime)
        self.assertTrue(
            self.server.read(fh, 0, len_) == b"\0" * off + data[:off])
        self.assertTrue(
            self.server.read(fh, self.max_obj_size, len_) == data[off:])
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_readdir(self):

        # Create a few entries
        names = [('entry_%2d' % i).encode() for i in range(20)]
        for name in names:
            (fh, inode) = self.server.create(ROOT_INODE,
                                             name, self.file_mode(), os.O_RDWR,
                                             Ctx())
            self.server.release(fh)
            self.server.forget([(inode.id, 1)])

        # Delete some to make sure that we don't have continous rowids
        remove_no = [0, 2, 3, 5, 9]
        for i in remove_no:
            self.server.unlink(ROOT_INODE, names[i])
            del names[i]

        # Read all
        fh = self.server.opendir(ROOT_INODE)
        self.assertListEqual(sorted(names + [b'lost+found']),
                             sorted(x[0] for x in self.server.readdir(fh, 0)))
        self.server.releasedir(fh)

        # Read in parts
        fh = self.server.opendir(ROOT_INODE)
        entries = list()
        try:
            next_ = 0
            while True:
                gen = self.server.readdir(fh, next_)
                for _ in range(3):
                    (name, _, next_) = next(gen)
                    entries.append(name)

        except StopIteration:
            pass

        self.assertListEqual(sorted(names + [b'lost+found']), sorted(entries))
        self.server.releasedir(fh)

        self.fsck()

    def test_forget(self):
        name = self.newname()

        # Test that entries are deleted when they're no longer referenced
        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'foobar')
        self.server.unlink(ROOT_INODE, name)
        self.assertFalse(
            self.db.has_val(
                'SELECT 1 FROM contents JOIN names ON names.id = name_id '
                'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertTrue(self.server.getattr(inode.id).id)
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        self.assertFalse(
            self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id, )))

        self.fsck()

    def test_removexattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        self.assertRaises(FUSEError, self.server.removexattr, inode.id,
                          b'some name')
        self.server.setxattr(inode.id, b'key1', b'blub')
        self.server.removexattr(inode.id, b'key1')
        self.assertListEqual([], self.server.listxattr(inode.id))
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_rename(self):
        oldname = self.newname()
        newname = self.newname()

        inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(),
                                        self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
        inode_p_old_before = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)

        self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)

        inode_p_old_after = self.server.getattr(ROOT_INODE)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        self.assertFalse(
            self.db.has_val(
                'SELECT inode FROM contents JOIN names ON names.id = name_id '
                'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE)))
        id_ = self.db.get_val(
            'SELECT inode FROM contents JOIN names ON names.id == name_id '
            'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id))
        self.assertEqual(inode.id, id_)

        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
        self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)

        self.server.forget([(inode.id, 1), (inode_p_new.id, 1)])
        self.fsck()

    def test_replace_file(self):
        oldname = self.newname()
        newname = self.newname()

        (fh, inode) = self.server.create(ROOT_INODE, oldname, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'some data to deal with')
        self.server.release(fh)
        self.server.setxattr(inode.id, b'test_xattr', b'42*8')

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(),
                                        self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
        inode_p_old_before = self.server.getattr(ROOT_INODE).copy()

        (fh, inode2) = self.server.create(inode_p_new.id, newname,
                                          self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'even more data to deal with')
        self.server.release(fh)
        self.server.setxattr(inode2.id, b'test_xattr', b'42*8')
        self.server.forget([(inode2.id, 1)])

        safe_sleep(CLOCK_GRANULARITY)
        self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)

        inode_p_old_after = self.server.getattr(ROOT_INODE)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        self.assertFalse(
            self.db.has_val(
                'SELECT inode FROM contents JOIN names ON names.id = name_id '
                'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE)))
        id_ = self.db.get_val(
            'SELECT inode FROM contents JOIN names ON names.id = name_id '
            'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id))
        self.assertEqual(inode.id, id_)

        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
        self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)

        self.assertFalse(
            self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id, )))
        self.server.forget([(inode.id, 1), (inode_p_new.id, 1)])
        self.fsck()

    def test_replace_dir(self):
        oldname = self.newname()
        newname = self.newname()

        inode = self.server.mkdir(ROOT_INODE, oldname, self.dir_mode(), Ctx())

        inode_p_new = self.server.mkdir(ROOT_INODE, self.newname(),
                                        self.dir_mode(), Ctx())
        inode_p_new_before = self.server.getattr(inode_p_new.id).copy()
        inode_p_old_before = self.server.getattr(ROOT_INODE).copy()

        inode2 = self.server.mkdir(inode_p_new.id, newname, self.dir_mode(),
                                   Ctx())
        self.server.forget([(inode2.id, 1)])

        safe_sleep(CLOCK_GRANULARITY)
        self.server.rename(ROOT_INODE, oldname, inode_p_new.id, newname)

        inode_p_old_after = self.server.getattr(ROOT_INODE)
        inode_p_new_after = self.server.getattr(inode_p_new.id)

        self.assertFalse(
            self.db.has_val(
                'SELECT inode FROM contents JOIN names ON names.id = name_id '
                'WHERE name=? AND parent_inode = ?', (oldname, ROOT_INODE)))
        id_ = self.db.get_val(
            'SELECT inode FROM contents JOIN names ON names.id = name_id '
            'WHERE name=? AND parent_inode = ?', (newname, inode_p_new.id))
        self.assertEqual(inode.id, id_)

        self.assertLess(inode_p_new_before.mtime, inode_p_new_after.mtime)
        self.assertLess(inode_p_new_before.ctime, inode_p_new_after.ctime)
        self.assertLess(inode_p_old_before.mtime, inode_p_old_after.mtime)
        self.assertLess(inode_p_old_before.ctime, inode_p_old_after.ctime)

        self.server.forget([(inode.id, 1), (inode_p_new.id, 1)])
        self.assertFalse(
            self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode2.id, )))
        self.fsck()

    def test_setattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(), 0o641,
                                         os.O_RDWR, Ctx())
        self.server.release(fh)
        inode_old = self.server.getattr(inode.id).copy()

        attr = llfuse.EntryAttributes()
        attr.st_mode = self.file_mode()
        attr.st_uid = randint(0, 2**32)
        attr.st_gid = randint(0, 2**32)
        attr.st_atime = randint(0, 2**32) / 10**6
        attr.st_mtime = randint(0, 2**32) / 10**6

        safe_sleep(CLOCK_GRANULARITY)
        self.server.setattr(inode.id, attr)
        inode_new = self.server.getattr(inode.id)
        self.assertGreater(inode_new.ctime, inode_old.ctime)

        for key in attr.__slots__:
            if getattr(attr, key) is not None:
                self.assertEqual(getattr(attr, key), getattr(inode_new, key))

        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_truncate(self):
        len_ = int(2.7 * self.max_obj_size)
        data = self.random_data(len_)
        attr = llfuse.EntryAttributes()

        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)

        attr.st_size = len_ // 2
        self.server.setattr(inode.id, attr)
        self.assertTrue(self.server.read(fh, 0, len_) == data[:len_ // 2])
        attr.st_size = len_
        self.server.setattr(inode.id, attr)
        self.assertTrue(
            self.server.read(fh, 0, len_) == data[:len_ // 2] + b'\0' *
            (len_ // 2))
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_truncate_0(self):
        len1 = 158
        len2 = 133
        attr = llfuse.EntryAttributes()

        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, self.random_data(len1))
        self.server.release(fh)
        self.server.inodes.flush()

        fh = self.server.open(inode.id, os.O_RDWR)
        attr.st_size = 0
        self.server.setattr(inode.id, attr)
        self.server.write(fh, 0, self.random_data(len2))
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_setxattr(self):
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        self.server.setxattr(inode.id, b'my-attr', b'strabumm!')
        self.assertEqual(self.server.getxattr(inode.id, b'my-attr'),
                         b'strabumm!')
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_names(self):
        name1 = self.newname()
        name2 = self.newname()

        (fh, inode) = self.server.create(ROOT_INODE, name1, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        (fh, inode) = self.server.create(ROOT_INODE, name2, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.release(fh)

        self.server.setxattr(inode.id, name1, b'strabumm!')
        self.fsck()

        self.server.removexattr(inode.id, name1)
        self.fsck()

        self.server.setxattr(inode.id, name1, b'strabumm karacho!!')
        self.server.unlink(ROOT_INODE, name1)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_statfs(self):
        # Test with zero contents
        self.server.statfs()

        # Test with empty file
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.statfs()

        # Test with data in file
        fh = self.server.open(inode.id, os.O_RDWR)
        self.server.write(fh, 0, b'foobar')
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.server.statfs()

    def test_symlink(self):
        target = self.newname()
        name = self.newname()

        inode_p_before = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)
        inode = self.server.symlink(ROOT_INODE, name, target, Ctx())
        inode_p_after = self.server.getattr(ROOT_INODE)

        self.assertEqual(target, self.server.readlink(inode.id))

        id_ = self.db.get_val(
            'SELECT inode FROM contents JOIN names ON names.id = name_id '
            'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE))

        self.assertEqual(inode.id, id_)
        self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
        self.assertLess(inode_p_before.ctime, inode_p_after.ctime)

        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_unlink(self):
        name = self.newname()

        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'some data to deal with')
        self.server.release(fh)

        # Add extended attributes
        self.server.setxattr(inode.id, b'test_xattr', b'42*8')
        self.server.forget([(inode.id, 1)])

        inode_p_before = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.server.unlink(ROOT_INODE, name)
        inode_p_after = self.server.getattr(ROOT_INODE)

        self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
        self.assertLess(inode_p_before.ctime, inode_p_after.ctime)

        self.assertFalse(
            self.db.has_val(
                'SELECT inode FROM contents JOIN names ON names.id = name_id '
                'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertFalse(
            self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id, )))

        self.fsck()

    def test_rmdir(self):
        name = self.newname()
        inode = self.server.mkdir(ROOT_INODE, name, self.dir_mode(), Ctx())
        self.server.forget([(inode.id, 1)])
        inode_p_before = self.server.getattr(ROOT_INODE).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.server.rmdir(ROOT_INODE, name)
        inode_p_after = self.server.getattr(ROOT_INODE)

        self.assertLess(inode_p_before.mtime, inode_p_after.mtime)
        self.assertLess(inode_p_before.ctime, inode_p_after.ctime)
        self.assertFalse(
            self.db.has_val(
                'SELECT inode FROM contents JOIN names ON names.id = name_id '
                'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertFalse(
            self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id, )))

        self.fsck()

    def test_relink(self):
        name = self.newname()
        name2 = self.newname()
        data = b'some data to deal with'

        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)
        self.server.release(fh)
        self.server.unlink(ROOT_INODE, name)
        self.server.inodes.flush()
        self.assertFalse(
            self.db.has_val(
                'SELECT inode FROM contents JOIN names ON names.id = name_id '
                'WHERE name=? AND parent_inode = ?', (name, ROOT_INODE)))
        self.assertTrue(
            self.db.has_val('SELECT id FROM inodes WHERE id=?', (inode.id, )))
        self.server.link(inode.id, ROOT_INODE, name2)
        self.server.forget([(inode.id, 2)])

        inode = self.server.lookup(ROOT_INODE, name2)
        fh = self.server.open(inode.id, os.O_RDONLY)
        self.assertTrue(self.server.read(fh, 0, len(data)) == data)
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_write(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        off = self.max_obj_size // 2
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        inode_before = self.server.getattr(inode.id).copy()
        safe_sleep(CLOCK_GRANULARITY)
        self.server.write(fh, off, data)
        inode_after = self.server.getattr(inode.id)

        self.assertGreater(inode_after.mtime, inode_before.mtime)
        self.assertGreater(inode_after.ctime, inode_before.ctime)
        self.assertEqual(inode_after.size, off + len_)

        self.server.write(fh, 0, data)
        inode_after = self.server.getattr(inode.id)
        self.assertEqual(inode_after.size, off + len_)

        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_failsafe(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)
        self.server.cache.clear()
        self.assertTrue(self.server.failsafe is False)

        datafile = os.path.join(self.backend_dir, 's3ql_data_', 's3ql_data_1')
        shutil.copy(datafile, datafile + '.bak')

        # Modify contents
        with open(datafile, 'rb+') as rfh:
            rfh.seek(560)
            rfh.write(b'blrub!')
        with self.assertRaises(FUSEError) as cm:
            with catch_logmsg('^Backend returned malformed data for',
                              count=1,
                              level=logging.ERROR):
                self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)
        self.assertTrue(self.server.failsafe)

        # Restore contents, but should be marked as damaged now
        os.rename(datafile + '.bak', datafile)
        with self.assertRaises(FUSEError) as cm:
            self.server.read(fh, 0, len_)
        self.assertEqual(cm.exception.errno, errno.EIO)

        # Release and re-open, now we should be able to access again
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        # ..but not write access since we are in failsafe mode
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode.id, os.O_RDWR)
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # ..ready only is fine.
        fh = self.server.open(inode.id, os.O_RDONLY)
        self.server.read(fh, 0, len_)

        # Remove completely, should give error after cache flush
        os.unlink(datafile)
        self.server.read(fh, 3, len_ // 2)
        self.server.cache.clear()
        with self.assertRaises(FUSEError) as cm:
            with catch_logmsg('^Backend lost block',
                              count=1,
                              level=logging.ERROR):
                self.server.read(fh, 5, len_ // 2)
        self.assertEqual(cm.exception.errno, errno.EIO)

        # Don't call fsck, we're missing a block

    def test_create_open(self):
        name = self.newname()
        # Create a new file
        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.forget([(inode, 1)])

        # Open it atomically
        (fh, inode) = self.server.create(ROOT_INODE, name, self.file_mode(),
                                         os.O_RDWR, Ctx())
        self.server.release(fh)
        self.server.forget([(inode, 1)])

        self.fsck()

    def test_edit(self):
        len_ = self.max_obj_size
        data = self.random_data(len_)
        (fh, inode) = self.server.create(ROOT_INODE, self.newname(),
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, data)
        self.server.release(fh)

        self.block_cache.clear()

        fh = self.server.open(inode.id, os.O_RDWR)
        attr = llfuse.EntryAttributes()
        attr.st_size = 0
        self.server.setattr(inode.id, attr)
        self.server.write(fh, 0, data[50:])
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])
        self.fsck()

    def test_copy_tree(self):
        ext_attr_name = b'system.foo.brazl'
        ext_attr_val = b'schulla dku woumm bramp'

        src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(),
                                      Ctx())
        dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(),
                                      Ctx())

        # Create file
        (fh, f1_inode) = self.server.create(src_inode.id, b'file1',
                                            self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file1 contents')
        self.server.release(fh)
        self.server.setxattr(f1_inode.id, ext_attr_name, ext_attr_val)

        # Create hardlink
        (fh, f2_inode) = self.server.create(src_inode.id, b'file2',
                                            self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file2 contents')
        self.server.release(fh)
        f2_inode = self.server.link(f2_inode.id, src_inode.id,
                                    b'file2_hardlink')

        # Create subdirectory
        d1_inode = self.server.mkdir(src_inode.id, b'dir1', self.dir_mode(),
                                     Ctx())
        d2_inode = self.server.mkdir(d1_inode.id, b'dir2', self.dir_mode(),
                                     Ctx())

        # ..with a 3rd hardlink
        f2_inode = self.server.link(f2_inode.id, d1_inode.id,
                                    b'file2_hardlink')

        # Replicate
        self.server.copy_tree(src_inode.id, dst_inode.id)

        # Change files
        fh = self.server.open(f1_inode.id, os.O_RDWR)
        self.server.write(fh, 0, b'new file1 contents')
        self.server.release(fh)

        fh = self.server.open(f2_inode.id, os.O_RDWR)
        self.server.write(fh, 0, b'new file2 contents')
        self.server.release(fh)

        # Get copy properties
        f1_inode_c = self.server.lookup(dst_inode.id, b'file1')
        f2_inode_c = self.server.lookup(dst_inode.id, b'file2')
        f2h_inode_c = self.server.lookup(dst_inode.id, b'file2_hardlink')
        d1_inode_c = self.server.lookup(dst_inode.id, b'dir1')
        d2_inode_c = self.server.lookup(d1_inode_c.id, b'dir2')
        f2_h_inode_c = self.server.lookup(d1_inode_c.id, b'file2_hardlink')

        # Check file1
        fh = self.server.open(f1_inode_c.id, os.O_RDWR)
        self.assertEqual(self.server.read(fh, 0, 42), b'file1 contents')
        self.server.release(fh)
        self.assertNotEqual(f1_inode.id, f1_inode_c.id)
        self.assertEqual(self.server.getxattr(f1_inode_c.id, ext_attr_name),
                         ext_attr_val)

        # Check file2
        fh = self.server.open(f2_inode_c.id, os.O_RDWR)
        self.assertTrue(self.server.read(fh, 0, 42) == b'file2 contents')
        self.server.release(fh)
        self.assertEqual(f2_inode_c.id, f2h_inode_c.id)
        self.assertEqual(f2_inode_c.refcount, 3)
        self.assertNotEqual(f2_inode.id, f2_inode_c.id)
        self.assertEqual(f2_h_inode_c.id, f2_inode_c.id)

        # Check subdir1
        self.assertNotEqual(d1_inode.id, d1_inode_c.id)
        self.assertNotEqual(d2_inode.id, d2_inode_c.id)
        self.server.forget(list(self.server.open_inodes.items()))
        self.fsck()

    def test_copy_tree_2(self):
        src_inode = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(),
                                      Ctx())
        dst_inode = self.server.mkdir(ROOT_INODE, b'dest', self.dir_mode(),
                                      Ctx())

        # Create file
        (fh, inode) = self.server.create(src_inode.id, b'file1',
                                         self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'block 1 contents')
        self.server.write(fh, self.max_obj_size, b'block 1 contents')
        self.server.release(fh)
        self.server.forget([(inode.id, 1)])

        self.server.copy_tree(src_inode.id, dst_inode.id)

        self.server.forget([(src_inode.id, 1), (dst_inode.id, 1)])
        self.fsck()

    def test_lock_tree(self):

        inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(),
                                   Ctx())

        # Create file
        (fh, inode1a) = self.server.create(inode1.id, b'file1',
                                           self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file1 contents')
        self.server.release(fh)

        # Create subdirectory
        inode2 = self.server.mkdir(inode1.id, b'dir1', self.dir_mode(), Ctx())
        (fh, inode2a) = self.server.create(inode2.id, b'file2',
                                           self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file2 contents')
        self.server.release(fh)

        # Another file
        (fh, inode3) = self.server.create(ROOT_INODE, b'file1',
                                          self.file_mode(), os.O_RDWR, Ctx())
        self.server.release(fh)

        # Lock
        self.server.lock_tree(inode1.id)

        for i in (inode1.id, inode1a.id, inode2.id, inode2a.id):
            self.assertTrue(self.server.inodes[i].locked)

        # Remove
        with self.assertRaises(FUSEError) as cm:
            self.server._remove(inode1.id, b'file1', inode1a.id)
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # Rename / Replace
        with self.assertRaises(FUSEError) as cm:
            self.server.rename(ROOT_INODE, b'file1', inode1.id, b'file2')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        with self.assertRaises(FUSEError) as cm:
            self.server.rename(inode1.id, b'file1', ROOT_INODE, b'file2')
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # Open
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode2a.id, os.O_RDWR)
        self.assertEqual(cm.exception.errno, errno.EPERM)
        with self.assertRaises(FUSEError) as cm:
            self.server.open(inode2a.id, os.O_WRONLY)
        self.assertEqual(cm.exception.errno, errno.EPERM)
        self.server.release(self.server.open(inode3.id, os.O_WRONLY))

        # Write
        fh = self.server.open(inode2a.id, os.O_RDONLY)
        with self.assertRaises(FUSEError) as cm:
            self.server.write(fh, 0, b'foo')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        self.server.release(fh)

        # Create
        with self.assertRaises(FUSEError) as cm:
            self.server._create(inode2.id, b'dir1', self.dir_mode(), os.O_RDWR,
                                Ctx())
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # Setattr
        with self.assertRaises(FUSEError) as cm:
            self.server.setattr(inode2a.id, dict())
        self.assertEqual(cm.exception.errno, errno.EPERM)

        # xattr
        with self.assertRaises(FUSEError) as cm:
            self.server.setxattr(inode2.id, b'name', b'value')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        with self.assertRaises(FUSEError) as cm:
            self.server.removexattr(inode2.id, b'name')
        self.assertEqual(cm.exception.errno, errno.EPERM)
        self.server.forget(list(self.server.open_inodes.items()))
        self.fsck()

    def test_remove_tree(self):

        inode1 = self.server.mkdir(ROOT_INODE, b'source', self.dir_mode(),
                                   Ctx())

        # Create file
        (fh, inode1a) = self.server.create(inode1.id, b'file1',
                                           self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file1 contents')
        self.server.release(fh)

        # Create subdirectory
        inode2 = self.server.mkdir(inode1.id, b'dir1', self.dir_mode(), Ctx())
        (fh, inode2a) = self.server.create(inode2.id, b'file2',
                                           self.file_mode(), os.O_RDWR, Ctx())
        self.server.write(fh, 0, b'file2 contents')
        self.server.release(fh)

        # Remove
        self.server.forget(list(self.server.open_inodes.items()))
        self.server.remove_tree(ROOT_INODE, b'source')

        for (id_p, name) in ((ROOT_INODE, b'source'), (inode1.id, b'file1'),
                             (inode1.id, b'dir1'), (inode2.id, b'file2')):
            self.assertFalse(
                self.db.has_val(
                    'SELECT inode FROM contents JOIN names ON names.id = name_id '
                    'WHERE name=? AND parent_inode = ?', (name, id_p)))

        for id_ in (inode1.id, inode1a.id, inode2.id, inode2a.id):
            self.assertFalse(
                self.db.has_val('SELECT id FROM inodes WHERE id=?', (id_, )))

        self.fsck()
Ejemplo n.º 46
0
Archivo: fsck.py Proyecto: drewlu/ossql
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)
        
    # Check if fs is mounted on this computer
    # This is not foolproof but should prevent common mistakes
    match = options.storage_url + ' /'
    with open('/proc/mounts', 'r') as fh:
        for line in fh:
            if line.startswith(match):
                raise QuietError('Can not check mounted file system.')
    

    bucket = get_bucket(options)
    
    cachepath = get_bucket_cachedir(options.storage_url, options.cachedir)
    seq_no = get_seq_no(bucket)
    param_remote = bucket.lookup('s3ql_metadata')
    db = None
    
    if os.path.exists(cachepath + '.params'):
        assert os.path.exists(cachepath + '.db')
        param = pickle.load(open(cachepath + '.params', 'rb'))
        if param['seq_no'] < seq_no:
            log.info('Ignoring locally cached metadata (outdated).')
            param = bucket.lookup('s3ql_metadata')
        else:
            log.info('Using cached metadata.')
            db = Connection(cachepath + '.db')
            assert not os.path.exists(cachepath + '-cache') or param['needs_fsck']
    
        if param_remote['seq_no'] != param['seq_no']:
            log.warn('Remote metadata is outdated.')
            param['needs_fsck'] = True
            
    else:
        param = param_remote
        assert not os.path.exists(cachepath + '-cache')
        # .db might exist if mount.s3ql is killed at exactly the right instant
        # and should just be ignored.
       
    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new, please update your '
                         'S3QL installation.')
    
    if param['seq_no'] < seq_no:
        if bucket.is_get_consistent():
            print(textwrap.fill(textwrap.dedent('''\
                  Up to date metadata is not available. Probably the file system has not
                  been properly unmounted and you should try to run fsck on the computer 
                  where the file system has been mounted most recently.
                  ''')))
        else:
            print(textwrap.fill(textwrap.dedent('''\
                  Up to date metadata is not available. Either the file system has not
                  been unmounted cleanly or the data has not yet propagated through the backend.
                  In the later case, waiting for a while should fix the problem, in
                  the former case you should try to run fsck on the computer where
                  the file system has been mounted most recently
                  ''')))
    
        print('Enter "continue" to use the outdated data anyway:',
              '> ', sep='\n', end='')
        if options.batch:
            raise QuietError('(in batch mode, exiting)')
        if sys.stdin.readline().strip() != 'continue':
            raise QuietError()
        
        param['seq_no'] = seq_no
        param['needs_fsck'] = True
    
    
    if (not param['needs_fsck'] 
        and ((time.time() - time.timezone) - param['last_fsck'])
             < 60 * 60 * 24 * 31): # last check more than 1 month ago
        if options.force:
            log.info('File system seems clean, checking anyway.')
        else:
            log.info('File system is marked as clean. Use --force to force checking.')
            return
    
    # If using local metadata, check consistency
    if db:
        log.info('Checking DB integrity...')
        try:
            # get_list may raise CorruptError itself
            res = db.get_list('PRAGMA integrity_check(20)')
            if res[0][0] != u'ok':
                log.error('\n'.join(x[0] for x in res ))
                raise apsw.CorruptError()
        except apsw.CorruptError:
            raise QuietError('Local metadata is corrupted. Remove or repair the following '
                             'files manually and re-run fsck:\n'
                             + cachepath + '.db (corrupted)\n'
                             + cachepath + '.param (intact)')
    else:
        log.info("Downloading & uncompressing metadata...")
        os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                         stat.S_IRUSR | stat.S_IWUSR)) 
        db = Connection(cachepath + '.db.tmp', fast_mode=True)
        with bucket.open_read("s3ql_metadata") as fh:
            restore_metadata(fh, db)
        db.close()
        os.rename(cachepath + '.db.tmp', cachepath + '.db')
        db = Connection(cachepath + '.db')
    
    # Increase metadata sequence no 
    param['seq_no'] += 1
    param['needs_fsck'] = True
    bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
    
    fsck = Fsck(cachepath + '-cache', bucket, param, db)
    fsck.check()
    
    if fsck.uncorrectable_errors:
        raise QuietError("Uncorrectable errors found, aborting.")
        
    if os.path.exists(cachepath + '-cache'):
        os.rmdir(cachepath + '-cache')
        
    log.info('Saving metadata...')
    fh = tempfile.TemporaryFile()
    dump_metadata(fh, db)  
            
    log.info("Compressing & uploading metadata..")
    cycle_metadata(bucket)
    fh.seek(0)
    param['needs_fsck'] = False
    param['last_fsck'] = time.time() - time.timezone
    param['last-modified'] = time.time() - time.timezone
    with bucket.open_write("s3ql_metadata", param) as dst:
        fh.seek(0)
        shutil.copyfileobj(fh, dst)
    fh.close()
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
        
    db.execute('ANALYZE')
    db.execute('VACUUM')
    db.close() 
Ejemplo n.º 47
0
 def setUp(self):
     self.dbfile = tempfile.NamedTemporaryFile()
     self.db = Connection(self.dbfile.name)
     create_tables(self.db)
     init_tables(self.db)
     self.cache = inode_cache.InodeCache(self.db)