def test_api_mount_with_prefetch(nydus_anchor, nydus_image: RafsImage, rafs_conf: RafsConf): nydus_image.set_backend(Backend.OSS).create_image() hint_files = ["/"] rafs = RafsMount(nydus_anchor, None, None, with_defaults=False) # Prefetch must enable blobcache rafs_conf.enable_rafs_blobcache() rafs_conf.set_rafs_backend(Backend.OSS) rafs_conf.enable_fs_prefetch(threads_count=4) rafs_conf.dump_rafs_conf() rafs.set_mountpoint(nydus_anchor.mount_point).apisock("api_sock").mount( dump_config=False, ) nc = NydusAPIClient(rafs.get_apisock()) nc.pseudo_fs_mount( nydus_image.bootstrap_path, "/pseudo_fs_1", rafs_conf.path(), hint_files, "rafs", ) # Only one rafs mountpoint exists, so whether set rafs id or not is not important. m = nc.get_blobcache_metrics() # TODO this won't pass # assert m["prefetch_data_amount"] != 0 wg = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo_fs_1"), nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(4, 8) wg.finish_torture_read() m = nc.get_blobcache_metrics("/pseudo_fs_1")
def test_specified_prefetch( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_scratch_image: RafsImage, backend, ): """ description: Nydusd can have a list including files and directories input when started. Then it can prefetch files from backend per as to the list. """ rafs_conf.set_rafs_backend(backend) rafs_conf.enable_fs_prefetch(prefetch_all=True) rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 8, 2) dist.generate_tree() dirs = dist.put_directories(20) dist.put_multiple_files(100, Size(64, Unit.KB)) dist.put_symlinks(30) dist.put_hardlinks(20) dist.put_multiple_files(40, Size(64, Unit.KB)) dist.put_single_file(Size(3, Unit.MB), name="test") nydus_scratch_image.set_backend(backend).create_image() prefetching_files = dirs prefetching_files += dist.files[:-10] prefetching_files += dist.dirs[:-5] prefetching_files += dist.symlinks[:-10] # Fuzz prefetching_files.append("/a/b/c/d") prefetching_files.append(os.path.join("/", "f/g/h/")) specified_dirs = " ".join( [os.path.join("/", d) for d in prefetching_files]) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.prefetch_files(specified_dirs).mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) nc = NydusAPIClient(rafs.get_apisock()) wg.setup_workload_generator() blobcache_metrics = nc.get_blobcache_metrics() wg.torture_read(5, 10) while blobcache_metrics["prefetch_workers"] != 0: time.sleep(0.5) blobcache_metrics = nc.get_blobcache_metrics() begin = nc.get_backend_metrics()["read_amount_total"] time.sleep(1) end = nc.get_backend_metrics()["read_amount_total"] assert end == begin wg.finish_torture_read()
def test_prefetch_with_cache( nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf, thread_cnt, compressor, is_cache_compressed, ): """ title: Prefetch from various backend description: - Enable rafs backend blob cache, as it is disabled by default pass_criteria: - Rafs can be mounted. - Rafs can be unmounted. """ rafs_conf.enable_rafs_blobcache(is_compressed=is_cache_compressed) rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY, prefix="object_prefix/") rafs_conf.enable_fs_prefetch(threads_count=4, bandwidth_rate=Size(40, Unit.MB).B) rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(3, Unit.MB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY, prefix="object_prefix/").create_image( compressor=compressor, readahead_policy="fs", readahead_files="/".encode(), ) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(4).mount() nc = NydusAPIClient(rafs.get_apisock()) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) m = nc.get_blobcache_metrics() time.sleep(0.3) assert m["prefetch_data_amount"] != 0 workload_gen.setup_workload_generator() workload_gen.torture_read(thread_cnt, 10) assert NydusAnchor.check_nydusd_health() workload_gen.finish_torture_read() assert not workload_gen.io_error # In this way, we can check if nydusd is crashed. assert rafs.is_mounted() rafs.umount()
def test_blobcache( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_image: RafsImage, nydus_scratch_parent_image: RafsImage, thread_cnt, io_duration, ): dist_parent = Distributor(nydus_scratch_parent_image.rootfs(), 6, 4) dist_parent.generate_tree() dist_parent.put_multiple_files(20, Size(4, Unit.KB)) hint_files_parent = [os.path.join("/", p) for p in dist_parent.files[-20:]] hint_files_parent = "\n".join(hint_files_parent[-1:]) nydus_scratch_parent_image.set_backend(Backend.OSS).create_image() # shutil.rmtree(nydus_scratch_parent_image.rootfs()) nydus_image.set_backend(Backend.OSS).create_image( readahead_policy="fs", parent_image=nydus_scratch_parent_image, readahead_files=hint_files_parent.encode(), ) nydus_anchor.mount_overlayfs( [nydus_image.rootfs(), nydus_scratch_parent_image.rootfs()]) rafs_conf.enable_rafs_blobcache().set_rafs_backend(Backend.OSS) rafs_conf.enable_fs_prefetch() rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.thread_num(4).mount() nc = NydusAPIClient(rafs.get_apisock()) m = nc.get_blobcache_metrics() # TODO: Open this check when prefetch is fixed. time.sleep(1) assert m["prefetch_data_amount"] != 0 wg = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) wg.setup_workload_generator() wg.torture_read(thread_cnt, io_duration) wg.finish_torture_read()
def test_prefetch_with_cache( nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf, thread_cnt, compressor, is_cache_compressed, converter, items, ): """ title: Prefetch from various backend description: - Enable rafs backend blob cache, as it is disabled by default pass_criteria: - Rafs can be mounted. - Rafs can be unmounted. """ dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(3, Unit.MB)) dist.put_multiple_files(10, Size(5, Unit.MB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) nydus_scratch_image.set_backend(Backend.LOCALFS).create_image( image_bin=converter, compressor=compressor, readahead_policy="fs", readahead_files="/".encode(), ) rafs_conf.enable_rafs_blobcache( is_compressed=is_cache_compressed).enable_fs_prefetch() rafs_conf.set_rafs_backend(Backend.LOCALFS, image=nydus_scratch_image) if len(items) > 0: for i in items: item = RafsConf.__dict__[i] item(rafs_conf) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(6).mount() nc = NydusAPIClient(rafs.get_apisock()) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) m = nc.get_blobcache_metrics() time.sleep(0.3) assert m["prefetch_data_amount"] != 0 workload_gen.verify_entire_fs() workload_gen.setup_workload_generator() workload_gen.torture_read(thread_cnt, 6) assert NydusAnchor.check_nydusd_health() workload_gen.finish_torture_read() assert not workload_gen.io_error