def test_prefetch_with_cache( nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf, thread_cnt, compressor, is_cache_compressed, ): """ title: Prefetch from various backend description: - Enable rafs backend blob cache, as it is disabled by default pass_criteria: - Rafs can be mounted. - Rafs can be unmounted. """ rafs_conf.enable_rafs_blobcache(is_compressed=is_cache_compressed) rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY, prefix="object_prefix/") rafs_conf.enable_fs_prefetch(threads_count=4, bandwidth_rate=Size(40, Unit.MB).B) rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(3, Unit.MB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY, prefix="object_prefix/").create_image( compressor=compressor, readahead_policy="fs", readahead_files="/".encode(), ) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(4).mount() nc = NydusAPIClient(rafs.get_apisock()) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) m = nc.get_blobcache_metrics() time.sleep(0.3) assert m["prefetch_data_amount"] != 0 workload_gen.setup_workload_generator() workload_gen.torture_read(thread_cnt, 10) assert NydusAnchor.check_nydusd_health() workload_gen.finish_torture_read() assert not workload_gen.io_error # In this way, we can check if nydusd is crashed. assert rafs.is_mounted() rafs.umount()
def test_prefetch_without_cache(nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf): """Files prefetch test 1. relative hinted prefetch files 2. absolute hinted prefetch files 3. source rootfs root dir. """ rafs_conf.enable_fs_prefetch().set_rafs_backend(Backend.BACKEND_PROXY) rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(8, Unit.KB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) hint_files = ["/"] hint_files.extend(dist.files) hint_files.extend(dist.dirs) hint_files.extend(dist.symlinks) hint_files.extend(dist.hardlinks) hint_files = [os.path.join("/", p) for p in hint_files] hint_files = "\n".join(hint_files) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image( readahead_policy="fs", readahead_files=hint_files.encode()) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() assert rafs.is_mounted() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) # TODO: Run several parallel read workers against the mount_point workload_gen.setup_workload_generator() workload_gen.torture_read(8, 5) workload_gen.finish_torture_read() assert NydusAnchor.check_nydusd_health() assert not workload_gen.io_error assert rafs.is_mounted() rafs.umount()
def test_hardlink(nydus_anchor: NydusAnchor, nydus_scratch_image, rafs_conf: RafsConf): dist = Distributor(nydus_scratch_image.rootfs(), 8, 6) dist.generate_tree() hardlink_verifier = verifier.HardlinkVerifier(nydus_scratch_image.rootfs(), dist) hardlink_verifier.scratch() nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) hardlink_verifier.verify(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) wg.setup_workload_generator() wg.io_read(3) nydus_anchor.check_nydusd_health() assert wg.io_error == False
def test_prefetch_with_cache( nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf, thread_cnt, compressor, is_cache_compressed, converter, items, ): """ title: Prefetch from various backend description: - Enable rafs backend blob cache, as it is disabled by default pass_criteria: - Rafs can be mounted. - Rafs can be unmounted. """ dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(3, Unit.MB)) dist.put_multiple_files(10, Size(5, Unit.MB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) nydus_scratch_image.set_backend(Backend.LOCALFS).create_image( image_bin=converter, compressor=compressor, readahead_policy="fs", readahead_files="/".encode(), ) rafs_conf.enable_rafs_blobcache( is_compressed=is_cache_compressed).enable_fs_prefetch() rafs_conf.set_rafs_backend(Backend.LOCALFS, image=nydus_scratch_image) if len(items) > 0: for i in items: item = RafsConf.__dict__[i] item(rafs_conf) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(6).mount() nc = NydusAPIClient(rafs.get_apisock()) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) m = nc.get_blobcache_metrics() time.sleep(0.3) assert m["prefetch_data_amount"] != 0 workload_gen.verify_entire_fs() workload_gen.setup_workload_generator() workload_gen.torture_read(thread_cnt, 6) assert NydusAnchor.check_nydusd_health() workload_gen.finish_torture_read() assert not workload_gen.io_error