def test_build_image(nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf): """ title: Build nydus image description: Build nydus image from rootfs generating proper bootstrap and blob pass_criteria: - Image can successfully builded and mounted - Rafs can be unmounted and do a small account of read io and attr operation - Try let image builder upload blob itself. """ dist = Distributor(nydus_scratch_image.rootfs(), 80, 1) dist.generate_tree() dist.put_directories(100) dist.put_hardlinks(90) dist.put_symlinks(200) dist.put_multiple_files(random.randint(20, 28), Size(10, Unit.MB)) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) Whiteout().whiteout_one_file(nydus_scratch_image.rootfs(), "i/am/troublemaker/foo") nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs_conf.set_rafs_backend(backend_type=Backend.BACKEND_PROXY) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) rafs.mount() assert wg.verify_entire_fs() rafs.umount()
def verify(self, target_dir, source_dir): for links in self.dist.hardlinks.values(): try: links_iter = iter(links) l = next(links_iter) except StopIteration: continue t_hl_path = os.path.join(target_dir, l) last_md5 = WorkloadGen.calc_file_md5(t_hl_path) last_stat = os.stat(t_hl_path) last_path = t_hl_path for l in links_iter: t_hl_path = os.path.join(target_dir, l) t_hl_md5 = WorkloadGen.calc_file_md5(t_hl_path) t_hl_stat = os.stat(t_hl_path) assert last_md5 == t_hl_md5 assert ( last_stat == t_hl_stat ), f"last hardlink path {last_path}, cur hardlink path {t_hl_path}" last_md5 = t_hl_md5 last_stat = t_hl_stat last_path = t_hl_path with pushd(target_dir): assert (os.stat(os.path.join( target_dir, self.inner_hardlink_name)).st_nlink == 1)
def test_layered_localfs(nydus_anchor, nydus_scratch_image: RafsImage, nydus_scratch_parent_image: RafsImage): nydus_scratch_parent_image.set_backend(Backend.LOCALFS, blob_dir=()).create_image() nydus_scratch_image.set_backend( Backend.LOCALFS, blob_dir=()).create_image(parent_image=nydus_scratch_parent_image) nydus_anchor.mount_overlayfs( [nydus_scratch_image.rootfs(), nydus_scratch_parent_image.rootfs()]) rafs_conf = RafsConf(nydus_anchor).set_rafs_backend(Backend.LOCALFS) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) workload_gen.setup_workload_generator() assert workload_gen.verify_entire_fs() workload_gen.torture_read(5, 4) workload_gen.finish_torture_read()
def test_prefetch_with_cache( nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage, nydus_scratch_parent_image: RafsImage, rafs_conf: RafsConf, ): parent_rootfs = nydus_scratch_parent_image.rootfs() upper_rootfs = nydus_scratch_image.rootfs() rafs_conf.enable_validation() rafs_conf.set_rafs_backend(Backend.OSS) rafs_conf.enable_rafs_blobcache() rafs_conf.enable_fs_prefetch(threads_count=4, merging_size=512 * 1024) rafs_conf.dump_rafs_conf() dist_parent = Distributor(parent_rootfs, 6, 4) dist_parent.generate_tree() dist_parent.put_directories(20) dist_parent.put_multiple_files(100, Size(64, Unit.KB)) dist_parent.put_symlinks(30) dist_parent.put_hardlinks(20) dist_upper = Distributor(upper_rootfs, 3, 8) dist_upper.generate_tree() dist_upper.put_multiple_files(27, Size(3, Unit.MB)) dist_upper.put_symlinks(5) # hint_files_parent = dist_parent.put_multiple_files(1000, Size(8, Unit.KB)) # hint_files_parent = [os.path.join(parent_rootfs, p) for p in hint_files_parent] # hint_files_parent = "\n".join(hint_files_parent) nydus_scratch_parent_image.set_backend(Backend.OSS).create_image( readahead_policy="fs", readahead_files="/".encode()) hint_files = dist_upper.put_multiple_files(1000, Size(8, Unit.KB)) hint_files.extend(dist_upper.put_multiple_empty_files(200)) hint_files = [os.path.join("/", p) for p in hint_files] hint_files = "\n".join(hint_files) nydus_scratch_image.set_backend(Backend.OSS).create_image( parent_image=nydus_scratch_parent_image, readahead_policy="fs", readahead_files=hint_files.encode(), ) nydus_anchor.mount_overlayfs( [nydus_scratch_image.rootfs(), nydus_scratch_parent_image.rootfs()]) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(5).mount() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) workload_gen.setup_workload_generator() assert workload_gen.verify_entire_fs() workload_gen.torture_read(5, 20) workload_gen.finish_torture_read()
def test_file_tail(nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage, backend): """ description: Read data from file tail - Create several files of different sizes - Punch hole to each file of which some should have hole tail - Create rafs image from test scratch directory. - Mount rafs - Do some test. """ file_size_list = [ Size(1, Unit.KB), Size(6, Unit.KB), Size(2, Unit.MB), Size(10034, Unit.KB), ] file_list = [] dist = Distributor(nydus_anchor.scratch_dir, 2, 2) dist.generate_tree() for f_s in file_size_list: f_name = dist.put_single_file(f_s) file_list.append(f_name) # Punch hole with utils.pushd(nydus_anchor.scratch_dir): with open(f_name, "a+b") as f: fallocate( f, f_s.B - 500, 1000, mode=FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, ) nydus_scratch_image.set_backend(backend).create_image() rafs_conf = RafsConf(nydus_anchor, nydus_scratch_image) rafs_conf.set_rafs_backend(backend, image=nydus_scratch_image) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() with utils.pushd(nydus_anchor.mount_point): for name in file_list: with open(name, "rb") as f: size = os.stat(name).st_size f.seek(size - 300) buf = f.read(1000) assert len(buf) == 300 wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) for f in file_list: wg.verify_single_file(os.path.join(nydus_anchor.mount_point, f)) assert wg.io_error == False
def test_read_cache( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_image: RafsImage, nydus_parent_image: RafsImage, ): nydus_parent_image.set_backend(Backend.OSS).create_image() nydus_image.set_backend( Backend.OSS).create_image(parent_image=nydus_parent_image) nydus_anchor.mount_overlayfs( [nydus_image.rootfs(), nydus_parent_image.rootfs()]) rafs_conf.enable_rafs_blobcache().set_rafs_backend(Backend.OSS) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) wg.setup_workload_generator() wg.torture_read(12, 10) wg.finish_torture_read() assert wg.verify_entire_fs()
def test_layered_rebuild( nydus_anchor, nydus_scratch_image: RafsImage, nydus_scratch_parent_image: RafsImage, rafs_conf: RafsConf, backend, ): """ title: Layered image rebuild description: - Parent and upper have files whose contents are exactly the same. - Use files stats to check if file is overlayed. - Files with the same name but different modes. - Files with xattr in parent should be shadowed. pass_criteria: - Mount successfully. - No data corruption. """ rafs_conf.set_rafs_backend(backend) rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() parent_rootfs = nydus_scratch_parent_image.rootfs() upper_rootfs = nydus_scratch_image.rootfs() nydus_anchor.mount_overlayfs( [nydus_scratch_image.rootfs(), nydus_scratch_parent_image.rootfs()]) shared_files = [] dist_parent = Distributor(parent_rootfs, 6, 4) dist_parent.generate_tree() shared_files.extend(dist_parent.put_multiple_files(100, Size(64, Unit.KB))) shared_files.extend(dist_parent.put_symlinks(30)) shared_files.extend(dist_parent.put_hardlinks(30)) xattr_verifier = XattrVerifier(parent_rootfs, dist_parent) Whiteout.mirror_files(shared_files, parent_rootfs, upper_rootfs) xattr_verifier.scratch(parent_rootfs) nydus_scratch_parent_image.set_backend(backend).create_image() nydus_scratch_image.set_backend(backend).create_image( parent_image=nydus_scratch_parent_image) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) workload_gen.setup_workload_generator() xattr_verifier.verify(nydus_anchor.mount_point) assert workload_gen.verify_entire_fs() workload_gen.torture_read(5, 4) workload_gen.finish_torture_read()
def test_access_pattern(nydus_anchor, nydus_image, rafs_conf: RafsConf): rafs_id = "/" rafs_conf.enable_access_pattern().set_rafs_backend(Backend.OSS) rafs_conf.dump_rafs_conf() nydus_image.set_backend(Backend.OSS).create_image() rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() nc = NydusAPIClient(rafs.get_apisock()) wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(4, 8) duration = 4 while duration: time.sleep(1) duration -= 1 global_metrics = nc.get_global_metrics() global_metrics["access_pattern_enabled"] == True patterns = nc.get_access_patterns(rafs_id) assert len(patterns) != 0 patterns = nc.get_access_patterns() assert len(patterns) != 0 nc.get_access_patterns("poison") wg.finish_torture_read()
def test_api_mount_with_prefetch(nydus_anchor, nydus_image: RafsImage, rafs_conf: RafsConf): nydus_image.set_backend(Backend.OSS).create_image() hint_files = ["/"] rafs = RafsMount(nydus_anchor, None, None, with_defaults=False) # Prefetch must enable blobcache rafs_conf.enable_rafs_blobcache() rafs_conf.set_rafs_backend(Backend.OSS) rafs_conf.enable_fs_prefetch(threads_count=4) rafs_conf.dump_rafs_conf() rafs.set_mountpoint(nydus_anchor.mount_point).apisock("api_sock").mount( dump_config=False, ) nc = NydusAPIClient(rafs.get_apisock()) nc.pseudo_fs_mount( nydus_image.bootstrap_path, "/pseudo_fs_1", rafs_conf.path(), hint_files, "rafs", ) # Only one rafs mountpoint exists, so whether set rafs id or not is not important. m = nc.get_blobcache_metrics() # TODO this won't pass # assert m["prefetch_data_amount"] != 0 wg = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo_fs_1"), nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(4, 8) wg.finish_torture_read() m = nc.get_blobcache_metrics("/pseudo_fs_1")
def test_meta(nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_scratch_image: RafsImage): anchor = nydus_anchor rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY).enable_rafs_blobcache() rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 8, 5) dist.generate_tree() xattr_verifier = verifier.XattrVerifier(anchor.mount_point, dist) xattr_verifier.scratch(nydus_scratch_image.rootfs()) symlink_verifier = verifier.SymlinkVerifier(anchor.mount_point, dist) symlink_verifier.scratch() # Do some meta operations on scratch dir before creating rafs image file. # Use scratch dir as image source dir as we just prepared test meta into it. nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs = RafsMount(anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(4).mount() assert rafs.is_mounted() xattr_verifier.verify(anchor.mount_point) symlink_verifier.verify(anchor.mount_point, nydus_scratch_image.rootfs()) workload_gen = WorkloadGen(anchor.mount_point, nydus_scratch_image.rootfs()) workload_gen.setup_workload_generator() workload_gen.torture_read(10, 3) workload_gen.finish_torture_read() assert workload_gen.io_error == False assert anchor.check_nydusd_health()
def test_large_file(nydus_anchor, compressor, backend, amplified_size): _tmp_dir = tempfile.TemporaryDirectory(dir=nydus_anchor.workspace) large_file_dir = _tmp_dir.name dist = Distributor(large_file_dir, 3, 3) dist.generate_tree() dist.put_single_file(Size(20, Unit.MB)) dist.put_single_file(Size(10891, Unit.KB)) dist.put_multiple_files(10, Size(2, Unit.MB)) dist.put_multiple_files(10, Size(4, Unit.MB)) image = RafsImage(nydus_anchor, large_file_dir, "bs_large", "blob_large") image.set_backend(backend).create_image(compressor=compressor) rafs_conf = (RafsConf(nydus_anchor, image).enable_rafs_blobcache().amplify_io( amplified_size).set_rafs_backend(backend, image=image)) rafs = RafsMount(nydus_anchor, image, rafs_conf) rafs.thread_num(4).mount() workload_gen = WorkloadGen(nydus_anchor.mount_point, large_file_dir) workload_gen.setup_workload_generator() workload_gen.torture_read(8, 5) workload_gen.finish_torture_read() assert not workload_gen.io_error rafs.umount() image.clean_up()
def test_limited_mem(nydus_anchor, rafs_conf, nydus_image): """ description: Run nydusd in a memory limited environment. - Use `ulimit` to limit virtual memory nydusd can use. - Mount rafs - Torture rafs """ rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount(limited_mem=Size(3, Unit.GB)) wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(8, 10) nydus_anchor.start_stats_checker() wg.finish_torture_read() nydus_anchor.stop_stats_checker() assert wg.io_error == False assert nydus_anchor.check_nydusd_health()
def test_passthough_fs(nydus_anchor, nydus_image, rafs_conf): nydus_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs = RafsMount(nydus_anchor, None, rafs_conf, with_defaults=False) rafs.shared_dir(nydus_image.rootfs()).set_mountpoint( nydus_anchor.mount_point).apisock("api_sock").mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(8, 5) wg.finish_torture_read() assert wg.verify_entire_fs()
def test_specified_prefetch( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_scratch_image: RafsImage, backend, ): """ description: Nydusd can have a list including files and directories input when started. Then it can prefetch files from backend per as to the list. """ rafs_conf.set_rafs_backend(backend) rafs_conf.enable_fs_prefetch(prefetch_all=True) rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 8, 2) dist.generate_tree() dirs = dist.put_directories(20) dist.put_multiple_files(100, Size(64, Unit.KB)) dist.put_symlinks(30) dist.put_hardlinks(20) dist.put_multiple_files(40, Size(64, Unit.KB)) dist.put_single_file(Size(3, Unit.MB), name="test") nydus_scratch_image.set_backend(backend).create_image() prefetching_files = dirs prefetching_files += dist.files[:-10] prefetching_files += dist.dirs[:-5] prefetching_files += dist.symlinks[:-10] # Fuzz prefetching_files.append("/a/b/c/d") prefetching_files.append(os.path.join("/", "f/g/h/")) specified_dirs = " ".join( [os.path.join("/", d) for d in prefetching_files]) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.prefetch_files(specified_dirs).mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) nc = NydusAPIClient(rafs.get_apisock()) wg.setup_workload_generator() blobcache_metrics = nc.get_blobcache_metrics() wg.torture_read(5, 10) while blobcache_metrics["prefetch_workers"] != 0: time.sleep(0.5) blobcache_metrics = nc.get_blobcache_metrics() begin = nc.get_backend_metrics()["read_amount_total"] time.sleep(1) end = nc.get_backend_metrics()["read_amount_total"] assert end == begin wg.finish_torture_read()
def test_prefetch_with_cache( nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf, thread_cnt, compressor, is_cache_compressed, ): """ title: Prefetch from various backend description: - Enable rafs backend blob cache, as it is disabled by default pass_criteria: - Rafs can be mounted. - Rafs can be unmounted. """ rafs_conf.enable_rafs_blobcache(is_compressed=is_cache_compressed) rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY, prefix="object_prefix/") rafs_conf.enable_fs_prefetch(threads_count=4, bandwidth_rate=Size(40, Unit.MB).B) rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(3, Unit.MB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY, prefix="object_prefix/").create_image( compressor=compressor, readahead_policy="fs", readahead_files="/".encode(), ) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(4).mount() nc = NydusAPIClient(rafs.get_apisock()) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) m = nc.get_blobcache_metrics() time.sleep(0.3) assert m["prefetch_data_amount"] != 0 workload_gen.setup_workload_generator() workload_gen.torture_read(thread_cnt, 10) assert NydusAnchor.check_nydusd_health() workload_gen.finish_torture_read() assert not workload_gen.io_error # In this way, we can check if nydusd is crashed. assert rafs.is_mounted() rafs.umount()
def test_basic_conversion( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, source, fs_version, local_registry, nydusify_converter, ): """ No need to locate where bootstrap is as we can directly pull it from registry """ converter = Nydusify(nydus_anchor) time.sleep(1) converter.docker_v2().enable_multiplatfrom(False).convert( source, fs_version=fs_version) assert converter.locate_bootstrap() is not None pulled_bootstrap = converter.pull_bootstrap( tempfile.TemporaryDirectory(dir=nydus_anchor.workspace, suffix="bootstrap").name, "pulled_bootstrap", ) # Skopeo does not support media type: "application/vnd.oci.image.layer.nydus.blob.v1", # So can't download build cache like a oci image. layers, base = converter.extract_source_layers_names_and_download() nydus_anchor.mount_overlayfs(layers, base) converted_layers = converter.extract_converted_layers_names() converted_layers.sort() rafs_conf.set_rafs_backend(Backend.REGISTRY, repo=posixpath.basename(source).split(":")[0]) rafs_conf.enable_fs_prefetch() rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, None, rafs_conf) # Use `nydus-image inspect` to compare blob table in bootstrap and manifest workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) # No need to locate where bootstrap is as we can directly pull it from registry rafs.thread_num(6).bootstrap(pulled_bootstrap).prefetch_files("/").mount() assert workload_gen.verify_entire_fs() workload_gen.setup_workload_generator() workload_gen.torture_read(4, 6, verify=True) workload_gen.finish_torture_read()
def test_blobcache( nydus_anchor: NydusAnchor, nydus_image: RafsImage, rafs_conf: RafsConf, compressor, backend, ): """ Allocate a file with local test working directory. Loop the file so to get a small file system which is easy to get full. Change blob cache location the above test blobdir """ blobdir = "/blobdir" blob_backend = "blob_backend" fd = os.open(blob_backend, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.posix_fallocate(fd, 0, 1024 * 1024 * 4) os.close(fd) utils.execute(["mkfs.ext4", "-F", blob_backend]) utils.execute(["mount", blob_backend, blobdir]) rafs_conf.enable_rafs_blobcache() rafs_conf.set_rafs_backend(backend) rafs_conf.dump_rafs_conf() cache_file = os.listdir(blobdir) assert len(cache_file) == 1 rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() assert rafs.is_mounted() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.source_dir) workload_gen.setup_workload_generator() workload_gen.torture_read(4, 15) nydus_anchor.start_stats_checker() workload_gen.finish_torture_read() nydus_anchor.stop_stats_checker() cache_file = os.listdir(blobdir) assert len(cache_file) >= 2 if workload_gen.io_error: warnings.warn( UserWarning("Rafs will return EIO if blobcache file is full")) rafs.umount() ret, _ = utils.execute(["umount", blobdir]) assert ret os.unlink(blob_backend)
def test_basic( nydus_anchor, nydus_image: RafsImage, io_duration, backend, rafs_conf: RafsConf, fs_version, ): """ title: Basic functionality test description: Mount rafs with different mount options pass_criteria: - Rafs can be mounted. - Rafs can be unmounted. """ nydus_image.set_backend(backend, blob_dir=()).create_image(fs_version=fs_version) rafs_conf.set_rafs_backend(backend) rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() assert rafs.is_mounted() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs()) workload_gen.setup_workload_generator() workload_gen.io_read(io_duration) nydus_anchor.check_nydusd_health() assert workload_gen.io_error == False assert workload_gen.verify_entire_fs() assert rafs.is_mounted() rafs.umount()
def test_basic_read( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_image: RafsImage, nydus_parent_image: RafsImage, ): """ title: Build an image from parent image. description: Mount rafs to check if can act read correctly. """ nydus_parent_image.set_backend(Backend.OSS).create_image() nydus_image.set_backend( Backend.OSS).create_image(parent_image=nydus_parent_image) nydus_anchor.mount_overlayfs( [nydus_image.rootfs(), nydus_parent_image.rootfs()]) rafs_conf.enable_rafs_blobcache().set_rafs_backend(Backend.OSS) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) wg.setup_workload_generator() wg.io_read(5) assert wg.verify_entire_fs() assert wg.io_error == False
def test_blob_prefetch(nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage, readahead_policy): """ description: For rafs, there are two types of prefetching. 1. Prefetch files from fs-layer, which means each file is prefetched one by one. 2. Prefetch directly from backend/blob layer, which means a range will be fetched from blob """ # Try to delete any access log since if it present, bootstrap blob prefetch won't work. utils.execute("rm -rf *.access", shell=True) dist = Distributor(nydus_scratch_image.rootfs(), 8, 2) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(100, Size(64, Unit.KB)) dist.put_symlinks(30) dist.put_hardlinks(20) dist.put_multiple_files(40, Size(64, Unit.KB)) utils.clean_pagecache() hint_files = dist.files[-40:] hint_files.extend(dist.symlinks[-20:]) hint_files = [os.path.join("/", p) for p in hint_files] hint_files = "\n".join(hint_files) nydus_scratch_image.set_backend(Backend.LOCALFS).create_image( readahead_policy=readahead_policy, readahead_files=hint_files.encode(), ) rafs_conf = RafsConf(nydus_anchor, nydus_scratch_image) rafs_conf.set_rafs_backend(Backend.LOCALFS, image=nydus_scratch_image) rafs_conf.enable_records_readahead(interval=1) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) with utils.timer("Mount elapse"): rafs.thread_num(7).mount() assert rafs.is_mounted() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) # TODO: Run several parallel read workers against the mount_point wg.setup_workload_generator() wg.torture_read(5, 5) wg.finish_torture_read() utils.clean_pagecache()
def test_prefetch_without_cache(nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf): """Files prefetch test 1. relative hinted prefetch files 2. absolute hinted prefetch files 3. source rootfs root dir. """ rafs_conf.enable_fs_prefetch().set_rafs_backend(Backend.BACKEND_PROXY) rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(8, Unit.KB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) hint_files = ["/"] hint_files.extend(dist.files) hint_files.extend(dist.dirs) hint_files.extend(dist.symlinks) hint_files.extend(dist.hardlinks) hint_files = [os.path.join("/", p) for p in hint_files] hint_files = "\n".join(hint_files) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image( readahead_policy="fs", readahead_files=hint_files.encode()) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() assert rafs.is_mounted() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) # TODO: Run several parallel read workers against the mount_point workload_gen.setup_workload_generator() workload_gen.torture_read(8, 5) workload_gen.finish_torture_read() assert NydusAnchor.check_nydusd_health() assert not workload_gen.io_error assert rafs.is_mounted() rafs.umount()
def test_shared_blobcache(nydus_anchor, nydus_image, rafs_conf: RafsConf): """ description: Start more than one nydusd, let them share the same blobcache. """ nydus_image.set_backend(Backend.LOCALFS, blob_dir=()).create_image() rafs_conf.enable_rafs_blobcache().set_rafs_backend(Backend.LOCALFS) rafs_conf.dump_rafs_conf() def make_rafs(mountpoint): rafs = (RafsMount(nydus_anchor, nydus_image, rafs_conf).apisock( tempfile.NamedTemporaryFile().name).prefetch_files( "/").set_mountpoint(mountpoint)) return rafs cases = [] count = 10 for num in range(0, count): mountpoint = tempfile.TemporaryDirectory(dir=nydus_anchor.workspace, suffix="root_" + str(num)) rafs = make_rafs(mountpoint.name) rafs.mount(dump_config=False) workload_gen = WorkloadGen(mountpoint.name, nydus_image.rootfs()) workload_gen.setup_workload_generator() cases.append((rafs, workload_gen, mountpoint)) for case in cases: utils.clean_pagecache() case[1].torture_read(4, 5) for case in cases: case[1].finish_torture_read() # Ensure that blob & bitmap files are included in blobcache dir. assert len(os.listdir(nydus_anchor.blobcache_dir)) == 2 for case in cases: case[0].umount()
def test_different_partitions(nydus_anchor: NydusAnchor, rafs_conf): loop_file_1 = tempfile.NamedTemporaryFile(suffix="loop") loop_file_2 = tempfile.NamedTemporaryFile(suffix="loop") loop_mnt_1 = tempfile.TemporaryDirectory(dir=nydus_anchor.workspace) loop_mnt_2 = tempfile.TemporaryDirectory(dir=nydus_anchor.workspace) os.posix_fallocate(loop_file_1.fileno(), 0, Size(400, Unit.MB).B) os.posix_fallocate(loop_file_2.fileno(), 0, Size(400, Unit.MB).B) utils.execute(["mkfs.ext4", "-F", loop_file_1.name]) utils.execute(["mkfs.ext4", "-F", loop_file_2.name]) utils.execute(["mount", loop_file_1.name, loop_mnt_1.name]) utils.execute(["mount", loop_file_2.name, loop_mnt_2.name]) # TODO: Put more special files into dist1 = Distributor(loop_mnt_1.name, 5, 7) dist1.generate_tree() dist1.put_multiple_files(100, Size(12, Unit.KB)) dist2 = Distributor(loop_mnt_2.name, 5, 7) dist2.generate_tree() dist2.put_symlinks(20) dist2.put_multiple_files(50, Size(12, Unit.KB)) Whiteout.mirror_files(dist2.files[:20], loop_mnt_2.name, loop_mnt_1.name) parent_image = (RafsImage(nydus_anchor, loop_mnt_1.name).set_backend( Backend.OSS).create_image()) image = RafsImage(nydus_anchor, loop_mnt_2.name) image.set_backend(Backend.OSS).create_image(parent_image=parent_image) rafs_conf.set_rafs_backend(Backend.OSS) rafs = RafsMount(nydus_anchor, image, rafs_conf) rafs.mount() nydus_anchor.mount_overlayfs([image.rootfs(), parent_image.rootfs()]) wg = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) wg.setup_workload_generator() wg.torture_read(5, 5) wg.finish_torture_read() utils.execute(["umount", loop_mnt_1.name]) utils.execute(["umount", loop_mnt_2.name]) nydus_anchor.umount_overlayfs()
def test_build_image_param_blobid(nydus_anchor, nydus_image: RafsImage, rafs_conf: RafsConf): """ description: Test if nydus-image argument `--blob-id` works properly """ # More strict id check? nydus_image.set_backend(Backend.BACKEND_PROXY).set_param( "blob-id", uuid.uuid4()).create_image() rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY) rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(5, 5) wg.finish_torture_read()
def test_digest_validate(nydus_anchor, rafs_conf: RafsConf, nydus_image: RafsImage, compressor): rafs_conf.set_rafs_backend(Backend.LOCALFS) rafs_conf.enable_validation() rafs_conf.enable_rafs_blobcache() nydus_image.set_backend(Backend.LOCALFS, blob_dir=()).create_image(compressor=compressor) rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(5, 5, verify=True) wg.finish_torture_read()
def test_deep_directory(nydus_anchor, rafs_conf: RafsConf, nydus_scratch_image: RafsImage): dist = Distributor(nydus_anchor.scratch_dir, 100, 1) dist.generate_tree() nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) wg.setup_workload_generator() wg.torture_read(8, 5) wg.finish_torture_read() assert wg.verify_entire_fs()
def test_blobcache( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_image: RafsImage, nydus_scratch_parent_image: RafsImage, thread_cnt, io_duration, ): dist_parent = Distributor(nydus_scratch_parent_image.rootfs(), 6, 4) dist_parent.generate_tree() dist_parent.put_multiple_files(20, Size(4, Unit.KB)) hint_files_parent = [os.path.join("/", p) for p in dist_parent.files[-20:]] hint_files_parent = "\n".join(hint_files_parent[-1:]) nydus_scratch_parent_image.set_backend(Backend.OSS).create_image() # shutil.rmtree(nydus_scratch_parent_image.rootfs()) nydus_image.set_backend(Backend.OSS).create_image( readahead_policy="fs", parent_image=nydus_scratch_parent_image, readahead_files=hint_files_parent.encode(), ) nydus_anchor.mount_overlayfs( [nydus_image.rootfs(), nydus_scratch_parent_image.rootfs()]) rafs_conf.enable_rafs_blobcache().set_rafs_backend(Backend.OSS) rafs_conf.enable_fs_prefetch() rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.thread_num(4).mount() nc = NydusAPIClient(rafs.get_apisock()) m = nc.get_blobcache_metrics() # TODO: Open this check when prefetch is fixed. time.sleep(1) assert m["prefetch_data_amount"] != 0 wg = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) wg.setup_workload_generator() wg.torture_read(thread_cnt, io_duration) wg.finish_torture_read()
def test_global_metrics(nydus_anchor, nydus_image: RafsImage, rafs_conf: RafsConf): rafs_id = "/" rafs_conf.enable_files_iostats().set_rafs_backend(Backend.OSS) nydus_image.set_backend(Backend.OSS).create_image() rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() nc = NydusAPIClient(rafs.get_apisock()) gm = nc.get_global_metrics() assert gm["files_account_enabled"] == True assert gm["measure_latency"] == True file_counters = nc.get_files_metrics(rafs_id) assert len(file_counters) logging.info("There are %d file counters created.", len(file_counters)) wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs()) wg.setup_workload_generator() wg.io_read(4) file_counters = nc.get_files_metrics(rafs_id) assert file_counters is not None and len(file_counters) logging.info("There are %d file counters created after some read.", len(file_counters)) if len(file_counters): k = random.choice(list(file_counters)) logging.info("ino: %s, stats: %s", k, file_counters[k]) gm_old = nc.get_global_metrics() wg.io_read(4) gm_new = nc.get_global_metrics() assert gm_new["data_read"] > gm_old["data_read"] assert (gm_new["fop_hits"][nydusd_client.Fop.Read.get_value()] > gm_old["fop_hits"][nydusd_client.Fop.Read.get_value()]) rafs.umount()
def test_various_file_types(nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_scratch_image: RafsImage): """ description: Put various types of files into rootfs. - Regular, dir, char, block, fifo, sock, symlink """ with utils.pushd(nydus_scratch_image.rootfs()): fd = os.open("regular", os.O_CREAT | os.O_RDWR) os.close(fd) os.mkfifo("fifo") os.mknod("blk", 0o600 | stat.S_IFBLK, device=random.randint(0, 2 ^ 64)) os.mknod("char", 0o600 | stat.S_IFCHR, device=random.randint(0, 2 ^ 64)) os.mknod("sock", 0o600 | stat.S_IFSOCK, device=random.randint(0, 2 ^ 64)) os.symlink("regular", "symlink") nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() with utils.pushd(nydus_anchor.mount_point): assert os.path.exists("fifo") assert os.path.exists("blk") assert os.path.exists("char") assert os.path.exists("sock") assert os.path.exists("symlink") wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) wg.setup_workload_generator() assert wg.verify_entire_fs() wg.torture_read(2, 4) wg.finish_torture_read()
def test_detect_io_hang(nydus_anchor, nydus_image: RafsImage, rafs_conf: RafsConf): rafs_conf.enable_files_iostats().set_rafs_backend(Backend.OSS) rafs_conf.dump_rafs_conf() nydus_image.set_backend(Backend.OSS).create_image() rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.thread_num(5).mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(4, 8) nc = NydusAPIClient(rafs.get_apisock()) for _ in range(0, 30): ops = nc.get_inflight_metrics() time.sleep(0.1) print(ops) wg.finish_torture_read()