def test_snapshotter( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, image_url, nydus_snapshotter, local_registry, ): snapshotter = Snapshotter(nydus_anchor) containerd = Containerd(nydus_anchor, snapshotter).gen_config() snapshotter.set_root(containerd.root) nydus_anchor.put_dustbin(snapshotter) nydus_anchor.put_dustbin(containerd) converter = Nydusify(nydus_anchor) converter.docker_v2().convert(image_url) rafs_conf.set_rafs_backend(Backend.REGISTRY, repo=converter.original_repo) rafs_conf.enable_xattr() rafs_conf.dump_rafs_conf() snapshotter.run(rafs_conf.path()) time.sleep(1) containerd.run() cri = Cri(containerd.address, containerd.address) container_name = str(uuid.uuid4()) cri.run_container(converter.converted_image, container_name) id, status = cri.check_container_status(container_name, timeout=30) assert id is not None assert status cri.stop_rm_container(id) cri.remove_image(converter.converted_image) containerd.remove_image_sync(converter.converted_image)
def test_blobcache( nydus_anchor: NydusAnchor, nydus_image: RafsImage, rafs_conf: RafsConf, compressor, backend, ): """ Allocate a file with local test working directory. Loop the file so to get a small file system which is easy to get full. Change blob cache location the above test blobdir """ blobdir = "/blobdir" blob_backend = "blob_backend" fd = os.open(blob_backend, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.posix_fallocate(fd, 0, 1024 * 1024 * 4) os.close(fd) utils.execute(["mkfs.ext4", "-F", blob_backend]) utils.execute(["mount", blob_backend, blobdir]) rafs_conf.enable_rafs_blobcache() rafs_conf.set_rafs_backend(backend) rafs_conf.dump_rafs_conf() cache_file = os.listdir(blobdir) assert len(cache_file) == 1 rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf) rafs.mount() assert rafs.is_mounted() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.source_dir) workload_gen.setup_workload_generator() workload_gen.torture_read(4, 15) nydus_anchor.start_stats_checker() workload_gen.finish_torture_read() nydus_anchor.stop_stats_checker() cache_file = os.listdir(blobdir) assert len(cache_file) >= 2 if workload_gen.io_error: warnings.warn( UserWarning("Rafs will return EIO if blobcache file is full")) rafs.umount() ret, _ = utils.execute(["umount", blobdir]) assert ret os.unlink(blob_backend)
def test_basic_conversion( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, source, fs_version, local_registry, nydusify_converter, ): """ No need to locate where bootstrap is as we can directly pull it from registry """ converter = Nydusify(nydus_anchor) time.sleep(1) converter.docker_v2().enable_multiplatfrom(False).convert( source, fs_version=fs_version) assert converter.locate_bootstrap() is not None pulled_bootstrap = converter.pull_bootstrap( tempfile.TemporaryDirectory(dir=nydus_anchor.workspace, suffix="bootstrap").name, "pulled_bootstrap", ) # Skopeo does not support media type: "application/vnd.oci.image.layer.nydus.blob.v1", # So can't download build cache like a oci image. layers, base = converter.extract_source_layers_names_and_download() nydus_anchor.mount_overlayfs(layers, base) converted_layers = converter.extract_converted_layers_names() converted_layers.sort() rafs_conf.set_rafs_backend(Backend.REGISTRY, repo=posixpath.basename(source).split(":")[0]) rafs_conf.enable_fs_prefetch() rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() rafs = RafsMount(nydus_anchor, None, rafs_conf) # Use `nydus-image inspect` to compare blob table in bootstrap and manifest workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) # No need to locate where bootstrap is as we can directly pull it from registry rafs.thread_num(6).bootstrap(pulled_bootstrap).prefetch_files("/").mount() assert workload_gen.verify_entire_fs() workload_gen.setup_workload_generator() workload_gen.torture_read(4, 6, verify=True) workload_gen.finish_torture_read()
def test_prefetch_with_cache( nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf, thread_cnt, compressor, is_cache_compressed, ): """ title: Prefetch from various backend description: - Enable rafs backend blob cache, as it is disabled by default pass_criteria: - Rafs can be mounted. - Rafs can be unmounted. """ rafs_conf.enable_rafs_blobcache(is_compressed=is_cache_compressed) rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY, prefix="object_prefix/") rafs_conf.enable_fs_prefetch(threads_count=4, bandwidth_rate=Size(40, Unit.MB).B) rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(3, Unit.MB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY, prefix="object_prefix/").create_image( compressor=compressor, readahead_policy="fs", readahead_files="/".encode(), ) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(4).mount() nc = NydusAPIClient(rafs.get_apisock()) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) m = nc.get_blobcache_metrics() time.sleep(0.3) assert m["prefetch_data_amount"] != 0 workload_gen.setup_workload_generator() workload_gen.torture_read(thread_cnt, 10) assert NydusAnchor.check_nydusd_health() workload_gen.finish_torture_read() assert not workload_gen.io_error # In this way, we can check if nydusd is crashed. assert rafs.is_mounted() rafs.umount()
def test_snapshotter_converted_images( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, converted_images, nydus_snapshotter, ): # snapshotter = Snapshotter(nydus_anchor).enable_nydus_overlayfs() snapshotter = Snapshotter(nydus_anchor) containerd = Containerd(nydus_anchor, snapshotter).gen_config() snapshotter.set_root(containerd.root) nydus_anchor.put_dustbin(snapshotter) nydus_anchor.put_dustbin(containerd) # We can safely pass the step provide repo configured into the rafs configuration file. rafs_conf.set_rafs_backend(Backend.REGISTRY, scheme="https") rafs_conf.enable_xattr() rafs_conf.dump_rafs_conf() snapshotter.run(rafs_conf.path()) time.sleep(1) containerd.run() cri = Cri(containerd.address, containerd.address) id_set = [] for ref in converted_images: container_name = str(uuid.uuid4()) cri.run_container(ref, container_name) id, status = cri.check_container_status(container_name, timeout=30) assert id is not None assert status id_set.append((id, ref)) time.sleep(2) for id, ref in id_set: cri.stop_rm_container(id) cri.remove_image(ref) containerd.remove_image_sync(ref) # TODO: Rafs won't be unmounted and and nydusd still be alive even image is removed locally # So kill all nydusd here to make following test verification pass. Is this a bug? # Ensure nydusd must have been stopped here time.sleep(3)
def test_snapshotter_restart( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, converted_images, nydus_snapshotter, ): snapshotter = Snapshotter(nydus_anchor) containerd = Containerd(nydus_anchor, snapshotter).gen_config() snapshotter.set_root(containerd.root) nydus_anchor.put_dustbin(containerd) # We can safely pass the step provide repo configured into the rafs configuration file. rafs_conf.set_rafs_backend(Backend.REGISTRY, scheme="https") rafs_conf.enable_xattr().enable_fs_prefetch().enable_rafs_blobcache( work_dir=snapshotter.cache_dir()) rafs_conf.enable_xattr().dump_rafs_conf() rafs_conf.dump_rafs_conf() snapshotter.run(rafs_conf.path()) time.sleep(1) containerd.run() cri = Cri(containerd.address, containerd.address) id_set = [] for ref in converted_images: container_name = str(uuid.uuid4()) cri.run_container(ref, container_name) id, status = cri.check_container_status(container_name, timeout=30) assert id is not None assert status id_set.append((id, ref)) time.sleep(2) snapshotter.shutdown() snapshotter = Snapshotter(nydus_anchor) snapshotter.set_root(containerd.root) nydus_anchor.put_dustbin(snapshotter) snapshotter.run(rafs_conf.path()) for id, ref in id_set: cri.stop_rm_container(id) cri.remove_image(ref) containerd.remove_image_sync(ref)
def test_prefetch_without_cache(nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf): """Files prefetch test 1. relative hinted prefetch files 2. absolute hinted prefetch files 3. source rootfs root dir. """ rafs_conf.enable_fs_prefetch().set_rafs_backend(Backend.BACKEND_PROXY) rafs_conf.dump_rafs_conf() dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(8, Unit.KB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) hint_files = ["/"] hint_files.extend(dist.files) hint_files.extend(dist.dirs) hint_files.extend(dist.symlinks) hint_files.extend(dist.hardlinks) hint_files = [os.path.join("/", p) for p in hint_files] hint_files = "\n".join(hint_files) nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image( readahead_policy="fs", readahead_files=hint_files.encode()) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() assert rafs.is_mounted() workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) # TODO: Run several parallel read workers against the mount_point workload_gen.setup_workload_generator() workload_gen.torture_read(8, 5) workload_gen.finish_torture_read() assert NydusAnchor.check_nydusd_health() assert not workload_gen.io_error assert rafs.is_mounted() rafs.umount()
def test_hardlink(nydus_anchor: NydusAnchor, nydus_scratch_image, rafs_conf: RafsConf): dist = Distributor(nydus_scratch_image.rootfs(), 8, 6) dist.generate_tree() hardlink_verifier = verifier.HardlinkVerifier(nydus_scratch_image.rootfs(), dist) hardlink_verifier.scratch() nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image() rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.mount() wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) hardlink_verifier.verify(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) wg.setup_workload_generator() wg.io_read(3) nydus_anchor.check_nydusd_health() assert wg.io_error == False
def __init__( self, anchor: NydusAnchor, image: RafsImage, conf: RafsConf, with_defaults=True, bin=None, ): """Start up nydusd and mount rafs. :image: If image is `None`, then no `--metadata` will be passed to nydusd. In this case, we have to use API to mount rafs. """ anchor.nydusd = self # So pytest has a chance to clean up dirties. self.anchor = anchor self.rafs_image = image # Associate with a rafs image to boot up. self.conf: RafsConf = conf self.mount_point = anchor.mount_point # To which point nydus will mount self.param_value_prefix = " " self.mount_params = RafsMountParam(anchor.nydusd_bin if bin is None else bin) if with_defaults: self._set_default_mount_param()
) parser.add_argument( "--backend", type=str, default="", ) parser.add_argument( "--anchor", type=str, default="", ) parser.add_argument( "--oss-object-prefix", dest="oss_object_prefix", type=str, default=None, ) args = parser.parse_args() backend = args.backend anchor = NydusAnchor(args.anchor) sources = args.sources oss_object_prefix = args.oss_object_prefix print(sources) for s in sources: converter = Nydusify(anchor) converter.docker_v2().backend_type(backend, oss_object_prefix).convert(s)
import shutil import random import logging import random import yaml from argparse import ArgumentParser sys.path.append(os.path.realpath("framework")) from nydus_anchor import NydusAnchor from distributor import Distributor import utils from utils import Size, logging_setup logging_setup() ANCHOR = NydusAnchor() def define_fs_structure(structure_dist): with open(structure_dist) as fd: fs_dist = yaml.safe_load(fd) return fs_dist def put_files(dist: Distributor, f_type, count, size): """Example: depth: 4 width: 4 layers: - layer1:
def test_prefetch_with_cache( nydus_anchor, nydus_scratch_image: RafsImage, rafs_conf: RafsConf, thread_cnt, compressor, is_cache_compressed, converter, items, ): """ title: Prefetch from various backend description: - Enable rafs backend blob cache, as it is disabled by default pass_criteria: - Rafs can be mounted. - Rafs can be unmounted. """ dist = Distributor(nydus_scratch_image.rootfs(), 4, 4) dist.generate_tree() dist.put_directories(20) dist.put_multiple_files(40, Size(3, Unit.MB)) dist.put_multiple_files(10, Size(5, Unit.MB)) dist.put_hardlinks(6) dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB)) nydus_scratch_image.set_backend(Backend.LOCALFS).create_image( image_bin=converter, compressor=compressor, readahead_policy="fs", readahead_files="/".encode(), ) rafs_conf.enable_rafs_blobcache( is_compressed=is_cache_compressed).enable_fs_prefetch() rafs_conf.set_rafs_backend(Backend.LOCALFS, image=nydus_scratch_image) if len(items) > 0: for i in items: item = RafsConf.__dict__[i] item(rafs_conf) rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf) rafs.thread_num(6).mount() nc = NydusAPIClient(rafs.get_apisock()) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs()) m = nc.get_blobcache_metrics() time.sleep(0.3) assert m["prefetch_data_amount"] != 0 workload_gen.verify_entire_fs() workload_gen.setup_workload_generator() workload_gen.torture_read(thread_cnt, 6) assert NydusAnchor.check_nydusd_health() workload_gen.finish_torture_read() assert not workload_gen.io_error
def test_cross_platform_multiplatform( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, source, arch, enable_multiplatform, local_registry, nydusify_converter, ): """ - copy the entire repo from source registry to target registry - One image coresponds to manifest list while the other one to single manifest - Use cloned source rather than the one from original registry - Push the converted images to the original source - Also test multiplatform here - ? Seems with flag --multiplatform to nydusify, it still just push single manifest - converted manifest index has one more image than origin. """ # Copy the entire repo for multiplatform skopeo = utils.Skopeo() source_name_tagged = posixpath.basename(source) target_image = f"localhost:5000/{source_name_tagged}" cloned_source = f"localhost:5000/{source_name_tagged}" skopeo.copy_all_to_registry(source, target_image) origin_manifest_index = skopeo.manifest_list(cloned_source) utils.Skopeo.pretty_print(origin_manifest_index) converter = Nydusify(nydus_anchor) converter.docker_v2( ).build_cache_ref("localhost:5000/build_cache:000").platform( f"linux/{arch}").enable_multiplatfrom(enable_multiplatform).convert( cloned_source, target_ref=target_image) # TODO: configure registry backend from `local_registry` rather than anchor rafs_conf.set_rafs_backend(Backend.REGISTRY, repo=posixpath.basename(source).split(":")[0]) rafs_conf.enable_fs_prefetch() rafs_conf.enable_rafs_blobcache() pulled_bootstrap = converter.pull_bootstrap( tempfile.TemporaryDirectory(dir=nydus_anchor.workspace, suffix="bootstrap").name, "pulled_bootstrap", arch, ) # Skopeo does not support media type: "application/vnd.oci.image.layer.nydus.blob.v1", # So can't download build cache like a oci image. layers, base = converter.extract_source_layers_names_and_download( arch=arch) nydus_anchor.mount_overlayfs(layers, base) converted_layers = converter.extract_converted_layers_names(arch=arch) converted_layers.sort() converted_manifest_index = skopeo.manifest_list(cloned_source) utils.Skopeo.pretty_print(converted_manifest_index) assert (len(converted_manifest_index["manifests"]) - len(origin_manifest_index["manifests"]) == 1) # `inspect` will succeed if image to arch can be found. skopeo.inspect(target_image, image_arch=arch) converter.find_nydus_image(target_image, arch) target_image_config = converter.pull_config(target_image, arch=arch) assert target_image_config["architecture"] == arch records = converter.get_build_cache_records( "localhost:5000/build_cache:000") assert len(records) != 0 cached_layers = [c["digest"] for c in records] cached_layers.sort() # > assert cached_layers == converted_layers # E AssertionError: assert None == ['sha256:3f18...af3234b4c257'] # E +None # E -['sha256:3f18b27a912188108c8590684206bd9da7d81bbfd0e8325f3ef0af3234b4c257'] for r in converted_layers: assert r in cached_layers # Use `nydus-image inspect` to compare blob table in bootstrap and manifest workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) # No need to locate where bootstrap is as we can directly pull it from registry rafs = RafsMount(nydus_anchor, None, rafs_conf) rafs.thread_num(6).bootstrap(pulled_bootstrap).prefetch_files("/").mount() assert workload_gen.verify_entire_fs() workload_gen.setup_workload_generator() workload_gen.torture_read(8, 12, verify=True) workload_gen.finish_torture_read()
def test_upload_oss( nydus_anchor: NydusAnchor, rafs_conf: RafsConf, source, local_registry, nydusify_converter, ): """ docker python client manual: https://docker-py.readthedocs.io/en/stable/ Use pulled bootstrap from registry instead of newly generated by nydus-image to check if the bootstrap is pushed successfully. """ converter = Nydusify(nydus_anchor) time.sleep(1) oss_prefix = "nydus_v2/" converter.docker_v2().backend_type( "oss", oss_object_prefix=oss_prefix, filed=True).build_cache_ref( "localhost:5000/build_cache:000").force_push().convert(source) nydus_image_output = converter.nydus_image_output() blobs_to_remove = nydus_image_output["blobs"] # Just to observe if convertion is faster converter.docker_v2().backend_type( "oss", oss_object_prefix=oss_prefix).build_cache_ref( "localhost:5000/build_cache:000").force_push().convert(source) rafs_conf.set_rafs_backend(Backend.OSS, prefix=oss_prefix) rafs_conf.enable_fs_prefetch() rafs_conf.enable_rafs_blobcache() rafs_conf.dump_rafs_conf() bootstrap = converter.locate_bootstrap() # `check` deletes all files checker = Nydusify(nydus_anchor) checker.backend_type( "oss", oss_object_prefix=oss_prefix).with_new_work_dir( nydus_anchor.nydusify_work_dir + "-check").check(source) converted_layers = converter.extract_converted_layers_names() # With oss backend, ant useage, `layers` only has one member records = converter.get_build_cache_records( "localhost:5000/build_cache:000") assert len(records) != 0 cached_layers = [c["digest"] for c in records] assert cached_layers.sort() == converted_layers.sort() pulled_bootstrap = converter.pull_bootstrap( tempfile.TemporaryDirectory(dir=nydus_anchor.workspace, suffix="bootstrap").name, "pulled_bootstrap", ) layers, base = converter.extract_source_layers_names_and_download() nydus_anchor.mount_overlayfs(layers, base) rafs = RafsMount(nydus_anchor, None, rafs_conf) workload_gen = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs) rafs.thread_num(6).bootstrap(pulled_bootstrap).prefetch_files("/").mount() assert workload_gen.verify_entire_fs() workload_gen.setup_workload_generator() workload_gen.torture_read(8, 12, verify=True) workload_gen.finish_torture_read() oss = OssHelper( nydus_anchor.ossutil_bin, endpoint=nydus_anchor.oss_endpoint, bucket=nydus_anchor.oss_bucket, ak_id=nydus_anchor.oss_ak_id, ak_secret=nydus_anchor.oss_ak_secret, prefix=None, ) # Nydusify will skip upload blob as object if it exists. for b in blobs_to_remove: oss.rm(b)