Ejemplo n.º 1
0
def test_api_mount_with_prefetch(nydus_anchor, nydus_image: RafsImage,
                                 rafs_conf: RafsConf):
    nydus_image.set_backend(Backend.OSS).create_image()

    hint_files = ["/"]
    rafs = RafsMount(nydus_anchor, None, None, with_defaults=False)

    # Prefetch must enable blobcache
    rafs_conf.enable_rafs_blobcache()
    rafs_conf.set_rafs_backend(Backend.OSS)
    rafs_conf.enable_fs_prefetch(threads_count=4)
    rafs_conf.dump_rafs_conf()
    rafs.set_mountpoint(nydus_anchor.mount_point).apisock("api_sock").mount(
        dump_config=False, )

    nc = NydusAPIClient(rafs.get_apisock())
    nc.pseudo_fs_mount(
        nydus_image.bootstrap_path,
        "/pseudo_fs_1",
        rafs_conf.path(),
        hint_files,
        "rafs",
    )

    # Only one rafs mountpoint exists, so whether set rafs id or not is not important.
    m = nc.get_blobcache_metrics()
    # TODO this won't pass
    # assert m["prefetch_data_amount"] != 0

    wg = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo_fs_1"),
                     nydus_image.rootfs())
    wg.setup_workload_generator()
    wg.torture_read(4, 8)
    wg.finish_torture_read()
    m = nc.get_blobcache_metrics("/pseudo_fs_1")
Ejemplo n.º 2
0
def test_specified_prefetch(
    nydus_anchor: NydusAnchor,
    rafs_conf: RafsConf,
    nydus_scratch_image: RafsImage,
    backend,
):
    """
    description:
        Nydusd can have a list including files and directories input when started.
        Then it can prefetch files from backend per as to the list.
    """

    rafs_conf.set_rafs_backend(backend)
    rafs_conf.enable_fs_prefetch(prefetch_all=True)
    rafs_conf.enable_rafs_blobcache()
    rafs_conf.dump_rafs_conf()

    dist = Distributor(nydus_scratch_image.rootfs(), 8, 2)
    dist.generate_tree()
    dirs = dist.put_directories(20)
    dist.put_multiple_files(100, Size(64, Unit.KB))
    dist.put_symlinks(30)
    dist.put_hardlinks(20)
    dist.put_multiple_files(40, Size(64, Unit.KB))
    dist.put_single_file(Size(3, Unit.MB), name="test")

    nydus_scratch_image.set_backend(backend).create_image()

    prefetching_files = dirs
    prefetching_files += dist.files[:-10]
    prefetching_files += dist.dirs[:-5]
    prefetching_files += dist.symlinks[:-10]
    # Fuzz
    prefetching_files.append("/a/b/c/d")
    prefetching_files.append(os.path.join("/", "f/g/h/"))

    specified_dirs = " ".join(
        [os.path.join("/", d) for d in prefetching_files])

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.prefetch_files(specified_dirs).mount()
    wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs())

    nc = NydusAPIClient(rafs.get_apisock())
    wg.setup_workload_generator()
    blobcache_metrics = nc.get_blobcache_metrics()
    wg.torture_read(5, 10)

    while blobcache_metrics["prefetch_workers"] != 0:
        time.sleep(0.5)
        blobcache_metrics = nc.get_blobcache_metrics()

    begin = nc.get_backend_metrics()["read_amount_total"]
    time.sleep(1)
    end = nc.get_backend_metrics()["read_amount_total"]

    assert end == begin
    wg.finish_torture_read()
Ejemplo n.º 3
0
def test_prefetch_with_cache(
    nydus_anchor,
    nydus_scratch_image: RafsImage,
    rafs_conf: RafsConf,
    thread_cnt,
    compressor,
    is_cache_compressed,
):
    """
    title: Prefetch from various backend
    description:
      - Enable rafs backend blob cache, as it is disabled by default
    pass_criteria:
      - Rafs can be mounted.
      - Rafs can be unmounted.
    """
    rafs_conf.enable_rafs_blobcache(is_compressed=is_cache_compressed)
    rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY, prefix="object_prefix/")
    rafs_conf.enable_fs_prefetch(threads_count=4,
                                 bandwidth_rate=Size(40, Unit.MB).B)
    rafs_conf.dump_rafs_conf()

    dist = Distributor(nydus_scratch_image.rootfs(), 4, 4)
    dist.generate_tree()
    dist.put_directories(20)
    dist.put_multiple_files(40, Size(3, Unit.MB))
    dist.put_hardlinks(6)
    dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB))

    nydus_scratch_image.set_backend(Backend.BACKEND_PROXY,
                                    prefix="object_prefix/").create_image(
                                        compressor=compressor,
                                        readahead_policy="fs",
                                        readahead_files="/".encode(),
                                    )

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.thread_num(4).mount()

    nc = NydusAPIClient(rafs.get_apisock())
    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_scratch_image.rootfs())
    m = nc.get_blobcache_metrics()
    time.sleep(0.3)
    assert m["prefetch_data_amount"] != 0

    workload_gen.setup_workload_generator()
    workload_gen.torture_read(thread_cnt, 10)

    assert NydusAnchor.check_nydusd_health()

    workload_gen.finish_torture_read()
    assert not workload_gen.io_error

    # In this way, we can check if nydusd is crashed.
    assert rafs.is_mounted()
    rafs.umount()
Ejemplo n.º 4
0
def test_basic_conversion(
    nydus_anchor: NydusAnchor,
    rafs_conf: RafsConf,
    source,
    fs_version,
    local_registry,
    nydusify_converter,
):
    """
    No need to locate where bootstrap is as we can directly pull it from registry
    """
    converter = Nydusify(nydus_anchor)

    time.sleep(1)

    converter.docker_v2().enable_multiplatfrom(False).convert(
        source, fs_version=fs_version)
    assert converter.locate_bootstrap() is not None
    pulled_bootstrap = converter.pull_bootstrap(
        tempfile.TemporaryDirectory(dir=nydus_anchor.workspace,
                                    suffix="bootstrap").name,
        "pulled_bootstrap",
    )

    # Skopeo does not support media type: "application/vnd.oci.image.layer.nydus.blob.v1",
    # So can't download build cache like a oci image.

    layers, base = converter.extract_source_layers_names_and_download()
    nydus_anchor.mount_overlayfs(layers, base)

    converted_layers = converter.extract_converted_layers_names()
    converted_layers.sort()

    rafs_conf.set_rafs_backend(Backend.REGISTRY,
                               repo=posixpath.basename(source).split(":")[0])
    rafs_conf.enable_fs_prefetch()
    rafs_conf.enable_rafs_blobcache()
    rafs_conf.dump_rafs_conf()

    rafs = RafsMount(nydus_anchor, None, rafs_conf)

    # Use `nydus-image inspect` to compare blob table in bootstrap and manifest

    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_anchor.overlayfs)
    # No need to locate where bootstrap is as we can directly pull it from registry
    rafs.thread_num(6).bootstrap(pulled_bootstrap).prefetch_files("/").mount()

    assert workload_gen.verify_entire_fs()
    workload_gen.setup_workload_generator()
    workload_gen.torture_read(4, 6, verify=True)
    workload_gen.finish_torture_read()
Ejemplo n.º 5
0
def test_blobcache_recovery(
    nydus_anchor: NydusAnchor,
    rafs_conf: RafsConf,
    nydus_scratch_image: RafsImage,
):
    rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY)
    rafs_conf.enable_fs_prefetch()
    rafs_conf.enable_rafs_blobcache()
    rafs_conf.dump_rafs_conf()

    dist = Distributor(nydus_scratch_image.rootfs(), 8, 2)
    dist.generate_tree()
    dirs = dist.put_directories(20)
    dist.put_multiple_files(100, Size(64, Unit.KB))
    dist.put_symlinks(30)
    dist.put_hardlinks(20)
    dist.put_multiple_files(40, Size(64, Unit.KB))
    dist.put_single_file(Size(3, Unit.MB), name="test")

    nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image()

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.prefetch_files("/").mount()
    wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs())

    wg.setup_workload_generator()
    wg.torture_read(4, 4)

    # Hopefully, prefetch can be done in 5 secondes.
    time.sleep(5)

    wg.finish_torture_read()
    rafs.umount()

    rafs2 = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs2.mount()

    wg.torture_read(4, 4)
    time.sleep(0.5)

    nc = NydusAPIClient(rafs2.get_apisock())

    begin = nc.get_backend_metrics()["read_amount_total"]
    time.sleep(1)
    end = nc.get_backend_metrics()["read_amount_total"]

    assert end == begin == 0

    wg.finish_torture_read()
Ejemplo n.º 6
0
def test_prefetch_without_cache(nydus_anchor: NydusAnchor,
                                nydus_scratch_image: RafsImage,
                                rafs_conf: RafsConf):
    """Files prefetch test

    1. relative hinted prefetch files
    2. absolute hinted prefetch files
    3. source rootfs root dir.
    """

    rafs_conf.enable_fs_prefetch().set_rafs_backend(Backend.BACKEND_PROXY)
    rafs_conf.dump_rafs_conf()

    dist = Distributor(nydus_scratch_image.rootfs(), 4, 4)
    dist.generate_tree()
    dist.put_directories(20)
    dist.put_multiple_files(40, Size(8, Unit.KB))
    dist.put_hardlinks(6)
    dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB))

    hint_files = ["/"]
    hint_files.extend(dist.files)
    hint_files.extend(dist.dirs)
    hint_files.extend(dist.symlinks)
    hint_files.extend(dist.hardlinks)

    hint_files = [os.path.join("/", p) for p in hint_files]
    hint_files = "\n".join(hint_files)

    nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image(
        readahead_policy="fs", readahead_files=hint_files.encode())

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.mount()
    assert rafs.is_mounted()

    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_scratch_image.rootfs())

    # TODO: Run several parallel read workers against the mount_point
    workload_gen.setup_workload_generator()
    workload_gen.torture_read(8, 5)
    workload_gen.finish_torture_read()

    assert NydusAnchor.check_nydusd_health()
    assert not workload_gen.io_error

    assert rafs.is_mounted()
    rafs.umount()
Ejemplo n.º 7
0
def test_pseudo_fs(nydus_anchor, nydus_image, rafs_conf: RafsConf):
    nydus_image.set_backend(Backend.BACKEND_PROXY).create_image()

    rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY)

    rafs = RafsMount(nydus_anchor, None, rafs_conf)
    rafs.mount()
    time.sleep(1)
    nc = NydusAPIClient(rafs.get_apisock())

    try:
        shutil.rmtree("pseudo_fs_scratch")
    except FileNotFoundError:
        pass

    scratch_rootfs = shutil.copytree(nydus_image.rootfs(),
                                     "pseudo_fs_scratch",
                                     symlinks=True)
    dist = Distributor(scratch_rootfs, 5, 5)
    dist.generate_tree()
    dist.put_multiple_files(20, Size(8, Unit.KB))

    ###
    suffix = "1"
    image = RafsImage(
        nydus_anchor,
        scratch_rootfs,
        "bs" + suffix,
        "blob" + suffix,
    )
    conf = RafsConf(nydus_anchor)
    conf.enable_fs_prefetch()
    conf.enable_validation()
    conf.set_rafs_backend(Backend.BACKEND_PROXY)
    conf.dump_rafs_conf()

    image.set_backend(Backend.BACKEND_PROXY).create_image()
    nc.pseudo_fs_mount(image.bootstrap_path, f"/pseudo{suffix}", conf.path(),
                       None)
    ###
    suffix = "2"
    image = RafsImage(
        nydus_anchor,
        scratch_rootfs,
        "bs" + suffix,
        "blob" + suffix,
    )
    conf = RafsConf(nydus_anchor)
    conf.enable_rafs_blobcache()
    conf.enable_validation()
    conf.enable_records_readahead()
    conf.set_rafs_backend(Backend.BACKEND_PROXY)
    conf.dump_rafs_conf()

    dist.put_multiple_files(20, Size(8, Unit.KB))

    image.set_backend(Backend.BACKEND_PROXY).create_image()
    nc.pseudo_fs_mount(image.bootstrap_path, f"/pseudo{suffix}", conf.path(),
                       None)
    ###
    suffix = "3"
    image = RafsImage(
        nydus_anchor,
        scratch_rootfs,
        "bs" + suffix,
        "blob" + suffix,
    )
    conf = RafsConf(nydus_anchor)
    conf.enable_rafs_blobcache()
    conf.enable_records_readahead()
    conf.set_rafs_backend(Backend.BACKEND_PROXY)
    conf.dump_rafs_conf()

    dist.put_multiple_files(20, Size(8, Unit.KB))

    image.set_backend(Backend.BACKEND_PROXY).create_image()
    nc.pseudo_fs_mount(image.bootstrap_path, f"/pseudo{suffix}", conf.path(),
                       None)

    wg1 = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo1"),
                      scratch_rootfs)
    wg2 = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo2"),
                      scratch_rootfs)
    wg3 = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo3"),
                      scratch_rootfs)

    time.sleep(2)
    wg1.setup_workload_generator()
    wg2.setup_workload_generator()
    wg3.setup_workload_generator()

    wg1.torture_read(4, 8)
    wg2.torture_read(4, 8)
    wg3.torture_read(4, 8)

    wg1.finish_torture_read()
    wg2.finish_torture_read()
    wg3.finish_torture_read()

    # TODO: Temporarily disable the verification as hard to select `verify dir`
    # assert wg1.verify_entire_fs()
    # assert wg2.verify_entire_fs()
    # assert wg3.verify_entire_fs()

    nc.umount_rafs("/pseudo1")
    nc.umount_rafs("/pseudo2")
    nc.umount_rafs("/pseudo3")
Ejemplo n.º 8
0
def test_cross_platform_multiplatform(
    nydus_anchor: NydusAnchor,
    rafs_conf: RafsConf,
    source,
    arch,
    enable_multiplatform,
    local_registry,
    nydusify_converter,
):
    """
    - copy the entire repo from source registry to target registry
    - One image coresponds to manifest list while the other one to single manifest
    - Use cloned source rather than the one from original registry
    - Push the converted images to the original source
    - Also test multiplatform here
    - ? Seems with flag --multiplatform to nydusify, it still just push single manifest
    - converted manifest index has one more image than origin.
    """

    # Copy the entire repo for multiplatform
    skopeo = utils.Skopeo()
    source_name_tagged = posixpath.basename(source)
    target_image = f"localhost:5000/{source_name_tagged}"
    cloned_source = f"localhost:5000/{source_name_tagged}"
    skopeo.copy_all_to_registry(source, target_image)

    origin_manifest_index = skopeo.manifest_list(cloned_source)
    utils.Skopeo.pretty_print(origin_manifest_index)

    converter = Nydusify(nydus_anchor)

    converter.docker_v2(
    ).build_cache_ref("localhost:5000/build_cache:000").platform(
        f"linux/{arch}").enable_multiplatfrom(enable_multiplatform).convert(
            cloned_source, target_ref=target_image)

    # TODO: configure registry backend from `local_registry` rather than anchor
    rafs_conf.set_rafs_backend(Backend.REGISTRY,
                               repo=posixpath.basename(source).split(":")[0])
    rafs_conf.enable_fs_prefetch()
    rafs_conf.enable_rafs_blobcache()

    pulled_bootstrap = converter.pull_bootstrap(
        tempfile.TemporaryDirectory(dir=nydus_anchor.workspace,
                                    suffix="bootstrap").name,
        "pulled_bootstrap",
        arch,
    )

    # Skopeo does not support media type: "application/vnd.oci.image.layer.nydus.blob.v1",
    # So can't download build cache like a oci image.
    layers, base = converter.extract_source_layers_names_and_download(
        arch=arch)
    nydus_anchor.mount_overlayfs(layers, base)

    converted_layers = converter.extract_converted_layers_names(arch=arch)
    converted_layers.sort()

    converted_manifest_index = skopeo.manifest_list(cloned_source)
    utils.Skopeo.pretty_print(converted_manifest_index)

    assert (len(converted_manifest_index["manifests"]) -
            len(origin_manifest_index["manifests"]) == 1)

    # `inspect` will succeed if image to arch can be found.
    skopeo.inspect(target_image, image_arch=arch)
    converter.find_nydus_image(target_image, arch)

    target_image_config = converter.pull_config(target_image, arch=arch)
    assert target_image_config["architecture"] == arch

    records = converter.get_build_cache_records(
        "localhost:5000/build_cache:000")
    assert len(records) != 0
    cached_layers = [c["digest"] for c in records]
    cached_layers.sort()
    # >       assert cached_layers == converted_layers
    # E       AssertionError: assert None == ['sha256:3f18...af3234b4c257']
    # E         +None
    # E         -['sha256:3f18b27a912188108c8590684206bd9da7d81bbfd0e8325f3ef0af3234b4c257']
    for r in converted_layers:
        assert r in cached_layers

    # Use `nydus-image inspect` to compare blob table in bootstrap and manifest
    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_anchor.overlayfs)
    # No need to locate where bootstrap is as we can directly pull it from registry
    rafs = RafsMount(nydus_anchor, None, rafs_conf)
    rafs.thread_num(6).bootstrap(pulled_bootstrap).prefetch_files("/").mount()

    assert workload_gen.verify_entire_fs()
    workload_gen.setup_workload_generator()
    workload_gen.torture_read(8, 12, verify=True)
    workload_gen.finish_torture_read()
Ejemplo n.º 9
0
def test_upload_oss(
    nydus_anchor: NydusAnchor,
    rafs_conf: RafsConf,
    source,
    local_registry,
    nydusify_converter,
):
    """
    docker python client manual: https://docker-py.readthedocs.io/en/stable/
    Use pulled bootstrap from registry instead of newly generated by nydus-image to check if the bootstrap is pushed successfully.
    """
    converter = Nydusify(nydus_anchor)

    time.sleep(1)

    oss_prefix = "nydus_v2/"

    converter.docker_v2().backend_type(
        "oss", oss_object_prefix=oss_prefix, filed=True).build_cache_ref(
            "localhost:5000/build_cache:000").force_push().convert(source)

    nydus_image_output = converter.nydus_image_output()
    blobs_to_remove = nydus_image_output["blobs"]

    # Just to observe if convertion is faster
    converter.docker_v2().backend_type(
        "oss", oss_object_prefix=oss_prefix).build_cache_ref(
            "localhost:5000/build_cache:000").force_push().convert(source)

    rafs_conf.set_rafs_backend(Backend.OSS, prefix=oss_prefix)
    rafs_conf.enable_fs_prefetch()
    rafs_conf.enable_rafs_blobcache()
    rafs_conf.dump_rafs_conf()

    bootstrap = converter.locate_bootstrap()

    # `check` deletes all files
    checker = Nydusify(nydus_anchor)
    checker.backend_type(
        "oss", oss_object_prefix=oss_prefix).with_new_work_dir(
            nydus_anchor.nydusify_work_dir + "-check").check(source)

    converted_layers = converter.extract_converted_layers_names()

    # With oss backend, ant useage, `layers` only has one member
    records = converter.get_build_cache_records(
        "localhost:5000/build_cache:000")
    assert len(records) != 0
    cached_layers = [c["digest"] for c in records]
    assert cached_layers.sort() == converted_layers.sort()

    pulled_bootstrap = converter.pull_bootstrap(
        tempfile.TemporaryDirectory(dir=nydus_anchor.workspace,
                                    suffix="bootstrap").name,
        "pulled_bootstrap",
    )

    layers, base = converter.extract_source_layers_names_and_download()
    nydus_anchor.mount_overlayfs(layers, base)

    rafs = RafsMount(nydus_anchor, None, rafs_conf)

    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_anchor.overlayfs)
    rafs.thread_num(6).bootstrap(pulled_bootstrap).prefetch_files("/").mount()

    assert workload_gen.verify_entire_fs()
    workload_gen.setup_workload_generator()
    workload_gen.torture_read(8, 12, verify=True)
    workload_gen.finish_torture_read()

    oss = OssHelper(
        nydus_anchor.ossutil_bin,
        endpoint=nydus_anchor.oss_endpoint,
        bucket=nydus_anchor.oss_bucket,
        ak_id=nydus_anchor.oss_ak_id,
        ak_secret=nydus_anchor.oss_ak_secret,
        prefix=None,
    )

    # Nydusify will skip upload blob as object if it exists.
    for b in blobs_to_remove:
        oss.rm(b)