Ejemplo n.º 1
0
    def setUp(self):

        self.backend_dir = tempfile.mkdtemp()
        self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir, 
                                                           None, None))

        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))

        self.cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                                self.max_obj_size * 100)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
Ejemplo n.º 2
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp()
        plain_backend = local.Backend('local://' + self.backend_dir, None, None)
        self.backend_pool = BackendPool(lambda: BetterBackend(b'schwubl', 'lzma', plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        self.block_cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                                      self.max_obj_size * 5)
        self.server = fs.Operations(self.block_cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))

        self.server.init()

        # Keep track of unused filenames
        self.name_cnt = 0
Ejemplo n.º 3
0
    def setUp(self):
        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)
        self.cache = inode_cache.InodeCache(self.db, 0)
Ejemplo n.º 4
0
 def setUp(self):
     # Destructors are not guaranteed to run, and we can't unlink
     # the file immediately because apsw refers to it by name. 
     # Therefore, we unlink the file manually in tearDown() 
     self.dbfile = tempfile.NamedTemporaryFile(delete=False)
     
     self.db = Connection(self.dbfile.name)
     create_tables(self.db)
     init_tables(self.db)
     self.cache = inode_cache.InodeCache(self.db, 0)
Ejemplo n.º 5
0
def db():
    dbfile = tempfile.NamedTemporaryFile()
    db = Connection(dbfile.name)
    create_tables(db)
    init_tables(db)
    try:
        yield db
    finally:
        db.close()
        dbfile.close()
Ejemplo n.º 6
0
def db():
    dbfile = tempfile.NamedTemporaryFile()
    db = Connection(dbfile.name)
    create_tables(db)
    init_tables(db)
    try:
        yield db
    finally:
        db.close()
        dbfile.close()
Ejemplo n.º 7
0
def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    ctx.backend_pool = BackendPool(lambda: local.Backend(
        Namespace(storage_url='local://' + ctx.backend_dir)))

    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)
    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    # Create an inode we can work with
    ctx.inode = 42
    now_ns = time_ns()
    ctx.db.execute(
        "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
        "VALUES (?,?,?,?,?,?,?,?,?)",
        (ctx.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
         os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 100)
    ctx.cache = cache

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyDistributor:
        def put(ctx, arg, timeout=None):
            cache._do_upload(*arg)
            return True

    cache.to_upload = DummyDistributor()

    # Tested methods assume that they are called from
    # file system request handler
    s3ql.block_cache.lock = MockLock()
    s3ql.block_cache.lock_released = MockLock()

    try:
        yield ctx
    finally:
        ctx.cache.backend_pool = ctx.backend_pool
        ctx.cache.destroy()
        shutil.rmtree(ctx.cachedir)
        shutil.rmtree(ctx.backend_dir)
        ctx.dbfile.close()
        os.unlink(ctx.dbfile.name)
Ejemplo n.º 8
0
def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    plain_backend = local.Backend(Namespace(
        storage_url='local://' + ctx.backend_dir))
    ctx.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6),
                                                          plain_backend))
    ctx.backend = ctx.backend_pool.pop_conn()
    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)

    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    # Tested methods assume that they are called from
    # file system request handler
    llfuse.lock.acquire()

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 5)
    ctx.block_cache = cache
    ctx.server = fs.Operations(cache, ctx.db, ctx.max_obj_size,
                                InodeCache(ctx.db, 0))
    ctx.server.init()

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyDistributor:
        def put(ctx, arg, timeout=None):
            cache._do_upload(*arg)
            return True
    cache.to_upload = DummyDistributor()

    # Keep track of unused filenames
    ctx.name_cnt = 0

    yield ctx

    ctx.server.inodes.destroy()
    llfuse.lock.release()
    ctx.block_cache.destroy()
    shutil.rmtree(ctx.cachedir)
    shutil.rmtree(ctx.backend_dir)
    os.unlink(ctx.dbfile.name)
    ctx.dbfile.close()
Ejemplo n.º 9
0
async def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    ctx.backend_pool = BackendPool(lambda: local.Backend(
        Namespace(storage_url='local://' + ctx.backend_dir)))

    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)
    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    # Create an inode we can work with
    ctx.inode = 42
    now_ns = time_ns()
    ctx.db.execute(
        "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
        "VALUES (?,?,?,?,?,?,?,?,?)",
        (ctx.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
         os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 100)
    cache.portal = trio.BlockingTrioPortal()
    ctx.cache = cache

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyChannel:
        async def send(self, arg):
            await trio.run_sync_in_worker_thread(cache._do_upload, *arg)

    cache.to_upload = (DummyChannel(), None)

    try:
        yield ctx
    finally:
        ctx.cache.backend_pool = ctx.backend_pool
        if ctx.cache.destroy is not None:
            await ctx.cache.destroy()
        shutil.rmtree(ctx.cachedir)
        shutil.rmtree(ctx.backend_dir)
        ctx.dbfile.close()
        os.unlink(ctx.dbfile.name)
Ejemplo n.º 10
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                         {'max_obj_size': self.max_obj_size}, self.db)
        self.fsck.expect_errors = True
Ejemplo n.º 11
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        self.fsck.expect_errors = True
Ejemplo n.º 12
0
async def ctx():
    ctx = Namespace()
    ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
    plain_backend = local.Backend(
        Namespace(storage_url='local://' + ctx.backend_dir))
    ctx.backend_pool = BackendPool(
        lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend))
    ctx.backend = ctx.backend_pool.pop_conn()
    ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
    ctx.max_obj_size = 1024

    # Destructors are not guaranteed to run, and we can't unlink
    # the file immediately because apsw refers to it by name.
    # Therefore, we unlink the file manually in tearDown()
    ctx.dbfile = tempfile.NamedTemporaryFile(delete=False)

    ctx.db = Connection(ctx.dbfile.name)
    create_tables(ctx.db)
    init_tables(ctx.db)

    cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache",
                       ctx.max_obj_size * 5)
    cache.portal = trio.BlockingTrioPortal()
    ctx.cache = cache
    ctx.server = fs.Operations(cache, ctx.db, ctx.max_obj_size,
                               InodeCache(ctx.db, 0))
    ctx.server.init()

    # Monkeypatch around the need for removal and upload threads
    cache.to_remove = DummyQueue(cache)

    class DummyChannel:
        async def send(self, arg):
            await trio.run_sync_in_worker_thread(cache._do_upload, *arg)

    cache.to_upload = (DummyChannel(), None)

    # Keep track of unused filenames
    ctx.name_cnt = 0

    yield ctx

    ctx.server.inodes.destroy()
    await ctx.cache.destroy()
    shutil.rmtree(ctx.cachedir)
    shutil.rmtree(ctx.backend_dir)
    os.unlink(ctx.dbfile.name)
    ctx.dbfile.close()
Ejemplo n.º 13
0
    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(
            lambda: local.Backend('local://' + self.backend_dir, None, None))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
            "VALUES (?,?,?,?,?,?,?,?,?)",
            (self.inode,
             stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
             | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
             os.getuid(), os.getgid(), time.time(), time.time(), time.time(),
             1, 32))

        cache = BlockCache(self.backend_pool, self.db,
                           self.cachedir + "/cache", self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True

        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
Ejemplo n.º 14
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp()
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        self.fsck.expect_errors = True
Ejemplo n.º 15
0
    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir,
                                                           None, None))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        now_ns = time_ns()
        self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                         os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32))

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
Ejemplo n.º 16
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        plain_backend = local.Backend('local://' + self.backend_dir, None,
                                      None)
        self.backend_pool = BackendPool(
            lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        cache = BlockCache(self.backend_pool, self.db,
                           self.cachedir + "/cache", self.max_obj_size * 5)
        self.block_cache = cache
        self.server = fs.Operations(cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))
        self.server.init()

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True

        cache.to_upload = DummyDistributor()

        # Keep track of unused filenames
        self.name_cnt = 0
Ejemplo n.º 17
0
    def setUp(self):
        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        plain_backend = local.Backend('local://' + self.backend_dir, None, None)
        self.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6),
                                                              plain_backend))
        self.backend = self.backend_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 5)
        self.block_cache = cache
        self.server = fs.Operations(cache, self.db, self.max_obj_size,
                                    InodeCache(self.db, 0))
        self.server.init()

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Keep track of unused filenames
        self.name_cnt = 0