def test_access_pattern(nydus_anchor, nydus_image, rafs_conf: RafsConf): rafs_id = "/" rafs_conf.enable_access_pattern().set_rafs_backend(Backend.OSS) rafs_conf.dump_rafs_conf() nydus_image.set_backend(Backend.OSS).create_image() rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() nc = NydusAPIClient(rafs.get_apisock()) wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(4, 8) duration = 4 while duration: time.sleep(1) duration -= 1 global_metrics = nc.get_global_metrics() global_metrics["access_pattern_enabled"] == True patterns = nc.get_access_patterns(rafs_id) assert len(patterns) != 0 patterns = nc.get_access_patterns() assert len(patterns) != 0 nc.get_access_patterns("poison") wg.finish_torture_read()
def test_build_image(nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf): """ title: Build nydus image description: Build nydus image from rootfs generating proper bootstrap and blob pass_criteria: - Image can successfully builded and mounted - Rafs can be unmounted and do a small account of read io and attr operation - Try let image builder upload blob itself. """ dist = Distributor(nydus_scratch_image.rootfs(), 80, 1) dist.generate_tree() dist.put_directories(100) dist.put_hardlinks(90) dist.put_symlinks(200) dist.put_multiple_files(random.randint(20, 28), Size(10, Unit.MB)) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) Whiteout().whiteout_one_file(nydus_scratch_image.rootfs(), "i/am/troublemaker/foo") nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs_conf.set_rafs_backend(backend_type=Backend.BACKEND_PROXY) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) rafs.mount() assert wg.verify_entire_fs() rafs.umount()
def test_snapshotter( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, image_url, nydus_snapshotter, local_registry, ): snapshotter = Snapshotter(nydus_anchor) containerd = Containerd(nydus_anchor, snapshotter).gen_config() snapshotter.set_root(containerd.root) nydus_anchor.put_dustbin(snapshotter) nydus_anchor.put_dustbin(containerd) converter = Nydusify(nydus_anchor) converter.docker_v2().convert(image_url) rafs_conf.set_rafs_backend(Backend.REGISTRY, repo=converter.original_repo) rafs_conf.enable_xattr() rafs_conf.dump_rafs_conf() snapshotter.run(rafs_conf.path()) time.sleep(1) containerd.run() cri = Cri(containerd.address, containerd.address) container_name = str(uuid.uuid4()) cri.run_container(converter.converted_image, container_name) id, status = cri.check_container_status(container_name, timeout=30) assert id is not None assert status cri.stop_rm_container(id) cri.remove_image(converter.converted_image) containerd.remove_image_sync(converter.converted_image)
def test_meta(nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_scratch_image: RafsImage): anchor = nydus_anchor rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY).enable_rafs_blobcache() rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 8, 5) dist.generate_tree() xattr_verifier = verifier.XattrVerifier(anchor.mount_point, dist) xattr_verifier.scratch(nydus_scratch_image.rootfs()) symlink_verifier = verifier.SymlinkVerifier(anchor.mount_point, dist) symlink_verifier.scratch() # Do some meta operations on scratch dir before creating rafs image file. # Use scratch dir as image source dir as we just prepared test meta into it. nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs = RafsMount(anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(4).mount() assert rafs.is_mounted() xattr_verifier.verify(anchor.mount_point) symlink_verifier.verify(anchor.mount_point, nydus_scratch_image.rootfs()) workload_gen = WorkloadGen(anchor.mount_point, nydus_scratch_image.rootfs()) workload_gen.setup_workload_generator() workload_gen.torture_read(10, 3) workload_gen.finish_torture_read() assert workload_gen.io_error == False assert anchor.check_nydusd_health()
def test_api_mount_with_prefetch(nydus_anchor, nydus_image: RafsImage, rafs_conf: RafsConf): nydus_image.set_backend(Backend.OSS).create_image() hint_files = ["/"] rafs = RafsMount(nydus_anchor, None, None, with_defaults=False) # Prefetch must enable blobcache rafs_conf.enable_rafs_blobcache() rafs_conf.set_rafs_backend(Backend.OSS) rafs_conf.enable_fs_prefetch(threads_count=4) rafs_conf.dump_rafs_conf() rafs.set_mountpoint(nydus_anchor.mount_point).apisock("api_sock").mount( dump_config=False, ) nc = NydusAPIClient(rafs.get_apisock()) nc.pseudo_fs_mount( nydus_image.bootstrap_path, "/pseudo_fs_1", rafs_conf.path(), hint_files, "rafs", ) # Only one rafs mountpoint exists, so whether set rafs id or not is not important. m = nc.get_blobcache_metrics() # TODO this won't pass # assert m["prefetch_data_amount"] != 0 wg = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo_fs_1"), nydus_image.rootfs()) wg.setup_workload_generator() wg.torture_read(4, 8) wg.finish_torture_read() m = nc.get_blobcache_metrics("/pseudo_fs_1")
def test_specified_prefetch( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_scratch_image: RafsImage, backend, ): """ description: Nydusd can have a list including files and directories input when started. Then it can prefetch files from backend per as to the list. """ rafs_conf.set_rafs_backend(backend) rafs_conf.enable_fs_prefetch(prefetch_all=True) rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 8, 2) dist.generate_tree() dirs = dist.put_directories(20) dist.put_multiple_files(100, Size(64, Unit.KB)) dist.put_symlinks(30) dist.put_hardlinks(20) dist.put_multiple_files(40, Size(64, Unit.KB)) dist.put_single_file(Size(3, Unit.MB), name="test") nydus_scratch_image.set_backend(backend).create_image() prefetching_files = dirs prefetching_files += dist.files[:-10] prefetching_files += dist.dirs[:-5] prefetching_files += dist.symlinks[:-10] # Fuzz prefetching_files.append("/a/b/c/d") prefetching_files.append(os.path.join("/", "f/g/h/")) specified_dirs = " ".join( [os.path.join("/", d) for d in prefetching_files]) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.prefetch_files(specified_dirs).mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) nc = NydusAPIClient(rafs.get_apisock()) wg.setup_workload_generator() blobcache_metrics = nc.get_blobcache_metrics() wg.torture_read(5, 10) while blobcache_metrics["prefetch_workers"] != 0: time.sleep(0.5) blobcache_metrics = nc.get_blobcache_metrics() begin = nc.get_backend_metrics()["read_amount_total"] time.sleep(1) end = nc.get_backend_metrics()["read_amount_total"] assert end == begin wg.finish_torture_read()
def test_prefetch_with_cache( nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf, thread_cnt, compressor, is_cache_compressed, ): """ title: Prefetch from various backend description: - Enable rafs backend blob cache, as it is disabled by default pass_criteria: - Rafs can be mounted. - Rafs can be unmounted. """ rafs_conf.enable_rafs_blobcache(is_compressed=is_cache_compressed) rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY, prefix="object_prefix/") rafs_conf.enable_fs_prefetch(threads_count=4, bandwidth_rate=Size(40, Unit.MB).B) rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(3, Unit.MB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY, prefix="object_prefix/").create_image( compressor=compressor, readahead_policy="fs", readahead_files="/".encode(), ) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(4).mount() nc = NydusAPIClient(rafs.get_apisock()) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) m = nc.get_blobcache_metrics() time.sleep(0.3) assert m["prefetch_data_amount"] != 0 workload_gen.setup_workload_generator() workload_gen.torture_read(thread_cnt, 10) assert NydusAnchor.check_nydusd_health() workload_gen.finish_torture_read() assert not workload_gen.io_error # In this way, we can check if nydusd is crashed. assert rafs.is_mounted() rafs.umount()
def test_blobcache( nydus_anchor: NydusAnchor, nydus_image: RafsImage, rafs_conf: RafsConf, compressor, backend, ): """ Allocate a file with local test working directory. Loop the file so to get a small file system which is easy to get full. Change blob cache location the above test blobdir """ blobdir = "/blobdir" blob_backend = "blob_backend" fd = os.open(blob_backend, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.posix_fallocate(fd, 0, 1024 * 1024 * 4) os.close(fd) utils.execute(["mkfs.ext4", "-F", blob_backend]) utils.execute(["mount", blob_backend, blobdir]) rafs_conf.enable_rafs_blobcache() rafs_conf.set_rafs_backend(backend) rafs_conf.dump_rafs_conf() cache_file = os.listdir(blobdir) assert len(cache_file) == 1 rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() assert rafs.is_mounted() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.source_dir) workload_gen.setup_workload_generator() workload_gen.torture_read(4, 15) nydus_anchor.start_stats_checker() workload_gen.finish_torture_read() nydus_anchor.stop_stats_checker() cache_file = os.listdir(blobdir) assert len(cache_file) >= 2 if workload_gen.io_error: warnings.warn( UserWarning("Rafs will return EIO if blobcache file is full")) rafs.umount() ret, _ = utils.execute(["umount", blobdir]) assert ret os.unlink(blob_backend)
def test_basic_conversion( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, source, fs_version, local_registry, nydusify_converter, ): """ No need to locate where bootstrap is as we can directly pull it from registry """ converter = Nydusify(nydus_anchor) time.sleep(1) converter.docker_v2().enable_multiplatfrom(False).convert( source, fs_version=fs_version) assert converter.locate_bootstrap() is not None pulled_bootstrap = converter.pull_bootstrap( tempfile.TemporaryDirectory(dir=nydus_anchor.workspace, suffix="bootstrap").name, "pulled_bootstrap", ) # Skopeo does not support media type: "application/vnd.oci.image.layer.nydus.blob.v1", # So can't download build cache like a oci image. layers, base = converter.extract_source_layers_names_and_download() nydus_anchor.mount_overlayfs(layers, base) converted_layers = converter.extract_converted_layers_names() converted_layers.sort() rafs_conf.set_rafs_backend(Backend.REGISTRY, repo=posixpath.basename(source).split(":")[0]) rafs_conf.enable_fs_prefetch() rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, None, rafs_conf) # Use `nydus-image inspect` to compare blob table in bootstrap and manifest workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) # No need to locate where bootstrap is as we can directly pull it from registry rafs.thread_num(6).bootstrap(pulled_bootstrap).prefetch_files("/").mount() assert workload_gen.verify_entire_fs() workload_gen.setup_workload_generator() workload_gen.torture_read(4, 6, verify=True) workload_gen.finish_torture_read()
def test_blob_prefetch(nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage, readahead_policy): """ description: For rafs, there are two types of prefetching. 1. Prefetch files from fs-layer, which means each file is prefetched one by one. 2. Prefetch directly from backend/blob layer, which means a range will be fetched from blob """ # Try to delete any access log since if it present, bootstrap blob prefetch won't work. utils.execute("rm -rf *.access", shell=True) dist = Distributor(nydus_scratch_image.rootfs(), 8, 2) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(100, Size(64, Unit.KB)) dist.put_symlinks(30) dist.put_hardlinks(20) dist.put_multiple_files(40, Size(64, Unit.KB)) utils.clean_pagecache() hint_files = dist.files[-40:] hint_files.extend(dist.symlinks[-20:]) hint_files = [os.path.join("/", p) for p in hint_files] hint_files = "\n".join(hint_files) nydus_scratch_image.set_backend(Backend.LOCALFS).create_image( readahead_policy=readahead_policy, readahead_files=hint_files.encode(), ) rafs_conf = RafsConf(nydus_anchor, nydus_scratch_image) rafs_conf.set_rafs_backend(Backend.LOCALFS, image=nydus_scratch_image) rafs_conf.enable_records_readahead(interval=1) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) with utils.timer("Mount elapse"): rafs.thread_num(7).mount() assert rafs.is_mounted() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) # TODO: Run several parallel read workers against the mount_point wg.setup_workload_generator() wg.torture_read(5, 5) wg.finish_torture_read() utils.clean_pagecache()
def test_blobcache_recovery( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, nydus_scratch_image: RafsImage, ): rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY) rafs_conf.enable_fs_prefetch() rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 8, 2) dist.generate_tree() dirs = dist.put_directories(20) dist.put_multiple_files(100, Size(64, Unit.KB)) dist.put_symlinks(30) dist.put_hardlinks(20) dist.put_multiple_files(40, Size(64, Unit.KB)) dist.put_single_file(Size(3, Unit.MB), name="test") nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.prefetch_files("/").mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) wg.setup_workload_generator() wg.torture_read(4, 4) # Hopefully, prefetch can be done in 5 secondes. time.sleep(5) wg.finish_torture_read() rafs.umount() rafs2 = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs2.mount() wg.torture_read(4, 4) time.sleep(0.5) nc = NydusAPIClient(rafs2.get_apisock()) begin = nc.get_backend_metrics()["read_amount_total"] time.sleep(1) end = nc.get_backend_metrics()["read_amount_total"] assert end == begin == 0 wg.finish_torture_read()
def test_prefetch_without_cache(nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf): """Files prefetch test 1. relative hinted prefetch files 2. absolute hinted prefetch files 3. source rootfs root dir. """ rafs_conf.enable_fs_prefetch().set_rafs_backend(Backend.BACKEND_PROXY) rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(8, Unit.KB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) hint_files = ["/"] hint_files.extend(dist.files) hint_files.extend(dist.dirs) hint_files.extend(dist.symlinks) hint_files.extend(dist.hardlinks) hint_files = [os.path.join("/", p) for p in hint_files] hint_files = "\n".join(hint_files) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image( readahead_policy="fs", readahead_files=hint_files.encode()) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() assert rafs.is_mounted() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) # TODO: Run several parallel read workers against the mount_point workload_gen.setup_workload_generator() workload_gen.torture_read(8, 5) workload_gen.finish_torture_read() assert NydusAnchor.check_nydusd_health() assert not workload_gen.io_error assert rafs.is_mounted() rafs.umount()
def test_syscalls( nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf, ): syscall_helper = "framework/test_syscalls" ret, _ = utils.execute( ["gcc", "framework/test_syscalls.c", "-o", syscall_helper], shell=False, print_output=True, ) assert ret dist = Distributor(nydus_scratch_image.rootfs(), 2, 2) dist.generate_tree() dist.put_single_file(Size(8, Unit.KB), pos=nydus_scratch_image.rootfs(), name="xattr_no_kv") dist.put_single_file_with_xattr( Size(8, Unit.KB), ("trusted.nydus.key", ""), pos=nydus_scratch_image.rootfs(), name="xattr_empty_value", ) dist.put_single_file_with_xattr( Size(8, Unit.KB), ("trusted.nydus.key", "1234567890"), pos=nydus_scratch_image.rootfs(), name="xattr_insufficient_buffer", ) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs_conf.enable_xattr().set_rafs_backend(Backend.BACKEND_PROXY) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() for no in [58]: ret, _ = utils.execute( [syscall_helper, nydus_anchor.mount_point, str(no)], shell=False, print_output=True, ) assert ret
def test_snapshotter_converted_images( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, converted_images, nydus_snapshotter, ): # snapshotter = Snapshotter(nydus_anchor).enable_nydus_overlayfs() snapshotter = Snapshotter(nydus_anchor) containerd = Containerd(nydus_anchor, snapshotter).gen_config() snapshotter.set_root(containerd.root) nydus_anchor.put_dustbin(snapshotter) nydus_anchor.put_dustbin(containerd) # We can safely pass the step provide repo configured into the rafs configuration file. rafs_conf.set_rafs_backend(Backend.REGISTRY, scheme="https") rafs_conf.enable_xattr() rafs_conf.dump_rafs_conf() snapshotter.run(rafs_conf.path()) time.sleep(1) containerd.run() cri = Cri(containerd.address, containerd.address) id_set = [] for ref in converted_images: container_name = str(uuid.uuid4()) cri.run_container(ref, container_name) id, status = cri.check_container_status(container_name, timeout=30) assert id is not None assert status id_set.append((id, ref)) time.sleep(2) for id, ref in id_set: cri.stop_rm_container(id) cri.remove_image(ref) containerd.remove_image_sync(ref) # TODO: Rafs won't be unmounted and and nydusd still be alive even image is removed locally # So kill all nydusd here to make following test verification pass. Is this a bug? # Ensure nydusd must have been stopped here time.sleep(3)
def test_snapshotter_restart( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, converted_images, nydus_snapshotter, ): snapshotter = Snapshotter(nydus_anchor) containerd = Containerd(nydus_anchor, snapshotter).gen_config() snapshotter.set_root(containerd.root) nydus_anchor.put_dustbin(containerd) # We can safely pass the step provide repo configured into the rafs configuration file. rafs_conf.set_rafs_backend(Backend.REGISTRY, scheme="https") rafs_conf.enable_xattr().enable_fs_prefetch().enable_rafs_blobcache( work_dir=snapshotter.cache_dir()) rafs_conf.enable_xattr().dump_rafs_conf() rafs_conf.dump_rafs_conf() snapshotter.run(rafs_conf.path()) time.sleep(1) containerd.run() cri = Cri(containerd.address, containerd.address) id_set = [] for ref in converted_images: container_name = str(uuid.uuid4()) cri.run_container(ref, container_name) id, status = cri.check_container_status(container_name, timeout=30) assert id is not None assert status id_set.append((id, ref)) time.sleep(2) snapshotter.shutdown() snapshotter = Snapshotter(nydus_anchor) snapshotter.set_root(containerd.root) nydus_anchor.put_dustbin(snapshotter) snapshotter.run(rafs_conf.path()) for id, ref in id_set: cri.stop_rm_container(id) cri.remove_image(ref) containerd.remove_image_sync(ref)
def test_deep_directory(nydus_anchor, rafs_conf: RafsConf, nydus_scratch_image: RafsImage): dist = Distributor(nydus_anchor.scratch_dir, 100, 1) dist.generate_tree() nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) wg.setup_workload_generator() wg.torture_read(8, 5) wg.finish_torture_read() assert wg.verify_entire_fs()
def test_shared_blobcache(nydus_anchor, nydus_image, rafs_conf: RafsConf): """ description: Start more than one nydusd, let them share the same blobcache. """ nydus_image.set_backend(Backend.LOCALFS, blob_dir=()).create_image() rafs_conf.enable_rafs_blobcache().set_rafs_backend(Backend.LOCALFS) rafs_conf.dump_rafs_conf() def make_rafs(mountpoint): rafs = (RafsMount(nydus_anchor, nydus_image, rafs_conf).apisock( tempfile.NamedTemporaryFile().name).prefetch_files( "/").set_mountpoint(mountpoint)) return rafs cases = [] count = 10 for num in range(0, count): mountpoint = tempfile.TemporaryDirectory(dir=nydus_anchor.workspace, suffix="root_" + str(num)) rafs = make_rafs(mountpoint.name) rafs.mount(dump_config=False) workload_gen = WorkloadGen(mountpoint.name, nydus_image.rootfs()) workload_gen.setup_workload_generator() cases.append((rafs, workload_gen, mountpoint)) for case in cases: utils.clean_pagecache() case[1].torture_read(4, 5) for case in cases: case[1].finish_torture_read() # Ensure that blob & bitmap files are included in blobcache dir. assert len(os.listdir(nydus_anchor.blobcache_dir)) == 2 for case in cases: case[0].umount()
def test_upload_oss( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, source, local_registry, nydusify_converter, ): """ docker python client manual: https://docker-py.readthedocs.io/en/stable/ Use pulled bootstrap from registry instead of newly generated by nydus-image to check if the bootstrap is pushed successfully. """ converter = Nydusify(nydus_anchor) time.sleep(1) oss_prefix = "nydus_v2/" converter.docker_v2().backend_type( "oss", oss_object_prefix=oss_prefix, filed=True).build_cache_ref( "localhost:5000/build_cache:000").force_push().convert(source) nydus_image_output = converter.nydus_image_output() blobs_to_remove = nydus_image_output["blobs"] # Just to observe if convertion is faster converter.docker_v2().backend_type( "oss", oss_object_prefix=oss_prefix).build_cache_ref( "localhost:5000/build_cache:000").force_push().convert(source) rafs_conf.set_rafs_backend(Backend.OSS, prefix=oss_prefix) rafs_conf.enable_fs_prefetch() rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() bootstrap = converter.locate_bootstrap() # `check` deletes all files checker = Nydusify(nydus_anchor) checker.backend_type( "oss", oss_object_prefix=oss_prefix).with_new_work_dir( nydus_anchor.nydusify_work_dir + "-check").check(source) converted_layers = converter.extract_converted_layers_names() # With oss backend, ant useage, `layers` only has one member records = converter.get_build_cache_records( "localhost:5000/build_cache:000") assert len(records) != 0 cached_layers = [c["digest"] for c in records] assert cached_layers.sort() == converted_layers.sort() pulled_bootstrap = converter.pull_bootstrap( tempfile.TemporaryDirectory(dir=nydus_anchor.workspace, suffix="bootstrap").name, "pulled_bootstrap", ) layers, base = converter.extract_source_layers_names_and_download() nydus_anchor.mount_overlayfs(layers, base) rafs = RafsMount(nydus_anchor, None, rafs_conf) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) rafs.thread_num(6).bootstrap(pulled_bootstrap).prefetch_files("/").mount() assert workload_gen.verify_entire_fs() workload_gen.setup_workload_generator() workload_gen.torture_read(8, 12, verify=True) workload_gen.finish_torture_read() oss = OssHelper( nydus_anchor.ossutil_bin, endpoint=nydus_anchor.oss_endpoint, bucket=nydus_anchor.oss_bucket, ak_id=nydus_anchor.oss_ak_id, ak_secret=nydus_anchor.oss_ak_secret, prefix=None, ) # Nydusify will skip upload blob as object if it exists. for b in blobs_to_remove: oss.rm(b)
def test_pseudo_fs(nydus_anchor, nydus_image, rafs_conf: RafsConf): nydus_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY) rafs = RafsMount(nydus_anchor, None, rafs_conf) rafs.mount() time.sleep(1) nc = NydusAPIClient(rafs.get_apisock()) try: shutil.rmtree("pseudo_fs_scratch") except FileNotFoundError: pass scratch_rootfs = shutil.copytree(nydus_image.rootfs(), "pseudo_fs_scratch", symlinks=True) dist = Distributor(scratch_rootfs, 5, 5) dist.generate_tree() dist.put_multiple_files(20, Size(8, Unit.KB)) ### suffix = "1" image = RafsImage( nydus_anchor, scratch_rootfs, "bs" + suffix, "blob" + suffix, ) conf = RafsConf(nydus_anchor) conf.enable_fs_prefetch() conf.enable_validation() conf.set_rafs_backend(Backend.BACKEND_PROXY) conf.dump_rafs_conf() image.set_backend(Backend.BACKEND_PROXY).create_image() nc.pseudo_fs_mount(image.bootstrap_path, f"/pseudo{suffix}", conf.path(), None) ### suffix = "2" image = RafsImage( nydus_anchor, scratch_rootfs, "bs" + suffix, "blob" + suffix, ) conf = RafsConf(nydus_anchor) conf.enable_rafs_blobcache() conf.enable_validation() conf.enable_records_readahead() conf.set_rafs_backend(Backend.BACKEND_PROXY) conf.dump_rafs_conf() dist.put_multiple_files(20, Size(8, Unit.KB)) image.set_backend(Backend.BACKEND_PROXY).create_image() nc.pseudo_fs_mount(image.bootstrap_path, f"/pseudo{suffix}", conf.path(), None) ### suffix = "3" image = RafsImage( nydus_anchor, scratch_rootfs, "bs" + suffix, "blob" + suffix, ) conf = RafsConf(nydus_anchor) conf.enable_rafs_blobcache() conf.enable_records_readahead() conf.set_rafs_backend(Backend.BACKEND_PROXY) conf.dump_rafs_conf() dist.put_multiple_files(20, Size(8, Unit.KB)) image.set_backend(Backend.BACKEND_PROXY).create_image() nc.pseudo_fs_mount(image.bootstrap_path, f"/pseudo{suffix}", conf.path(), None) wg1 = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo1"), scratch_rootfs) wg2 = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo2"), scratch_rootfs) wg3 = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo3"), scratch_rootfs) time.sleep(2) wg1.setup_workload_generator() wg2.setup_workload_generator() wg3.setup_workload_generator() wg1.torture_read(4, 8) wg2.torture_read(4, 8) wg3.torture_read(4, 8) wg1.finish_torture_read() wg2.finish_torture_read() wg3.finish_torture_read() # TODO: Temporarily disable the verification as hard to select `verify dir` # assert wg1.verify_entire_fs() # assert wg2.verify_entire_fs() # assert wg3.verify_entire_fs() nc.umount_rafs("/pseudo1") nc.umount_rafs("/pseudo2") nc.umount_rafs("/pseudo3")
def test_backend_swap(nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf): dist = Distributor(nydus_scratch_image.rootfs(), 5, 4) dist.generate_tree() dist.put_multiple_files(100, Size(2, Unit.MB)) nydus_scratch_image.set_backend(Backend.OSS).create_image( readahead_policy="fs", readahead_files="/".encode()) rafs_conf.set_rafs_backend( Backend.OSS).enable_rafs_blobcache().enable_fs_prefetch( threads_count=7, bandwidth_rate=Size(2, Unit.MB).B) rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, None, rafs_conf, with_defaults=False) rafs.thread_num(4).set_mountpoint( nydus_anchor.mount_point).apisock("api_sock").mount() nc = NydusAPIClient(rafs.get_apisock()) nc.pseudo_fs_mount(nydus_scratch_image.bootstrap_path, "/", rafs_conf.path(), None) nc.umount_rafs("/") assert len(os.listdir(nydus_anchor.mount_point)) == 0 mp = "/pseudo1" nc.pseudo_fs_mount(nydus_scratch_image.bootstrap_path, mp, rafs_conf.path(), None) rafs_conf_2nd = RafsConf(nydus_anchor, nydus_scratch_image) rafs_conf_2nd.set_rafs_backend( Backend.LOCALFS, image=nydus_scratch_image).enable_rafs_blobcache().enable_fs_prefetch( threads_count=3, bandwidth_rate=Size(1, Unit.MB).B) rafs_conf_2nd.dump_rafs_conf() new_image = (RafsImage( nydus_anchor, nydus_scratch_image.rootfs()).set_backend( Backend.LOCALFS).create_image(readahead_policy="fs", readahead_files="/".encode())) # TODO: Once upon a time, more than one fd are open. Check why this happens. wg = WorkloadGen( os.path.join(nydus_anchor.mount_point, mp.strip("/")), nydus_scratch_image.rootfs(), ) wg.setup_workload_generator() wg.torture_read(8, 8) for i in range(1, 50): logging.debug("swap for the %dth time", i) nc.swap_backend(mp, new_image.bootstrap_name, rafs_conf_2nd.path()) # assert nc.get_blobcache_metrics(mp)["prefetch_workers"] == 3 time.sleep(0.2) nc.swap_backend(mp, nydus_scratch_image.bootstrap_name, rafs_conf.path()) utils.clean_pagecache() wg.finish_torture_read() assert wg.io_error == False nc.umount_rafs(mp) utils.clean_pagecache()