def ctx(): ctx = Namespace() ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') plain_backend = local.Backend(Namespace( storage_url='local://' + ctx.backend_dir)) ctx.backend_pool = BackendPool(lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend)) ctx.backend = ctx.backend_pool.pop_conn() ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') ctx.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() ctx.dbfile = tempfile.NamedTemporaryFile(delete=False) ctx.db = Connection(ctx.dbfile.name) create_tables(ctx.db) init_tables(ctx.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache", ctx.max_obj_size * 5) ctx.block_cache = cache ctx.server = fs.Operations(cache, ctx.db, ctx.max_obj_size, InodeCache(ctx.db, 0)) ctx.server.init() # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(ctx, arg, timeout=None): cache._do_upload(*arg) return True cache.to_upload = DummyDistributor() # Keep track of unused filenames ctx.name_cnt = 0 yield ctx ctx.server.inodes.destroy() llfuse.lock.release() ctx.block_cache.destroy() shutil.rmtree(ctx.cachedir) shutil.rmtree(ctx.backend_dir) os.unlink(ctx.dbfile.name) ctx.dbfile.close()
async def ctx(): ctx = Namespace() ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') plain_backend = local.Backend( Namespace(storage_url='local://' + ctx.backend_dir)) ctx.backend_pool = BackendPool( lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend)) ctx.backend = ctx.backend_pool.pop_conn() ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') ctx.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() ctx.dbfile = tempfile.NamedTemporaryFile(delete=False) ctx.db = Connection(ctx.dbfile.name) create_tables(ctx.db) init_tables(ctx.db) cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache", ctx.max_obj_size * 5) cache.portal = trio.BlockingTrioPortal() ctx.cache = cache ctx.server = fs.Operations(cache, ctx.db, ctx.max_obj_size, InodeCache(ctx.db, 0)) ctx.server.init() # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyChannel: async def send(self, arg): await trio.run_sync_in_worker_thread(cache._do_upload, *arg) cache.to_upload = (DummyChannel(), None) # Keep track of unused filenames ctx.name_cnt = 0 yield ctx ctx.server.inodes.destroy() await ctx.cache.destroy() shutil.rmtree(ctx.cachedir) shutil.rmtree(ctx.backend_dir) os.unlink(ctx.dbfile.name) ctx.dbfile.close()
def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') plain_backend = local.Backend('local://' + self.backend_dir, None, None) self.backend_pool = BackendPool( lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend)) self.backend = self.backend_pool.pop_conn() self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache", self.max_obj_size * 5) self.block_cache = cache self.server = fs.Operations(cache, self.db, self.max_obj_size, InodeCache(self.db, 0)) self.server.init() # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(self, arg, timeout=None): cache._do_upload(*arg) return True cache.to_upload = DummyDistributor() # Keep track of unused filenames self.name_cnt = 0