async def ctx(): ctx = Namespace() ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') ctx.backend_pool = BackendPool(lambda: local.Backend( Namespace(storage_url='local://' + ctx.backend_dir))) ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') ctx.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() ctx.dbfile = tempfile.NamedTemporaryFile(delete=False) ctx.db = Connection(ctx.dbfile.name) create_tables(ctx.db) init_tables(ctx.db) # Create an inode we can work with ctx.inode = 42 now_ns = time_ns() ctx.db.execute( "INSERT INTO inodes (id,mode,uid,gid,mtime_ns,atime_ns,ctime_ns,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", (ctx.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), now_ns, now_ns, now_ns, 1, 32)) cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache", ctx.max_obj_size * 100) cache.portal = trio.BlockingTrioPortal() ctx.cache = cache # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyChannel: async def send(self, arg): await trio.run_sync_in_worker_thread(cache._do_upload, *arg) cache.to_upload = (DummyChannel(), None) try: yield ctx finally: ctx.cache.backend_pool = ctx.backend_pool if ctx.cache.destroy is not None: await ctx.cache.destroy() shutil.rmtree(ctx.cachedir) shutil.rmtree(ctx.backend_dir) ctx.dbfile.close() os.unlink(ctx.dbfile.name)
async def ctx(): ctx = Namespace() ctx.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') plain_backend = local.Backend( Namespace(storage_url='local://' + ctx.backend_dir)) ctx.backend_pool = BackendPool( lambda: ComprencBackend(b'schwubl', ('zlib', 6), plain_backend)) ctx.backend = ctx.backend_pool.pop_conn() ctx.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') ctx.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() ctx.dbfile = tempfile.NamedTemporaryFile(delete=False) ctx.db = Connection(ctx.dbfile.name) create_tables(ctx.db) init_tables(ctx.db) cache = BlockCache(ctx.backend_pool, ctx.db, ctx.cachedir + "/cache", ctx.max_obj_size * 5) cache.portal = trio.BlockingTrioPortal() ctx.cache = cache ctx.server = fs.Operations(cache, ctx.db, ctx.max_obj_size, InodeCache(ctx.db, 0)) ctx.server.init() # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyChannel: async def send(self, arg): await trio.run_sync_in_worker_thread(cache._do_upload, *arg) cache.to_upload = (DummyChannel(), None) # Keep track of unused filenames ctx.name_cnt = 0 yield ctx ctx.server.inodes.destroy() await ctx.cache.destroy() shutil.rmtree(ctx.cachedir) shutil.rmtree(ctx.backend_dir) os.unlink(ctx.dbfile.name) ctx.dbfile.close()