Example #1
0
def test_prefetch_with_cache(
    nydus_anchor: NydusAnchor,
    nydus_scratch_image: RafsImage,
    nydus_scratch_parent_image: RafsImage,
    rafs_conf: RafsConf,
):
    parent_rootfs = nydus_scratch_parent_image.rootfs()
    upper_rootfs = nydus_scratch_image.rootfs()

    rafs_conf.enable_validation()
    rafs_conf.set_rafs_backend(Backend.OSS)
    rafs_conf.enable_rafs_blobcache()
    rafs_conf.enable_fs_prefetch(threads_count=4, merging_size=512 * 1024)
    rafs_conf.dump_rafs_conf()

    dist_parent = Distributor(parent_rootfs, 6, 4)
    dist_parent.generate_tree()
    dist_parent.put_directories(20)
    dist_parent.put_multiple_files(100, Size(64, Unit.KB))
    dist_parent.put_symlinks(30)
    dist_parent.put_hardlinks(20)

    dist_upper = Distributor(upper_rootfs, 3, 8)
    dist_upper.generate_tree()
    dist_upper.put_multiple_files(27, Size(3, Unit.MB))
    dist_upper.put_symlinks(5)

    # hint_files_parent = dist_parent.put_multiple_files(1000, Size(8, Unit.KB))
    # hint_files_parent = [os.path.join(parent_rootfs, p) for p in hint_files_parent]
    # hint_files_parent = "\n".join(hint_files_parent)

    nydus_scratch_parent_image.set_backend(Backend.OSS).create_image(
        readahead_policy="fs", readahead_files="/".encode())

    hint_files = dist_upper.put_multiple_files(1000, Size(8, Unit.KB))
    hint_files.extend(dist_upper.put_multiple_empty_files(200))

    hint_files = [os.path.join("/", p) for p in hint_files]
    hint_files = "\n".join(hint_files)

    nydus_scratch_image.set_backend(Backend.OSS).create_image(
        parent_image=nydus_scratch_parent_image,
        readahead_policy="fs",
        readahead_files=hint_files.encode(),
    )

    nydus_anchor.mount_overlayfs(
        [nydus_scratch_image.rootfs(),
         nydus_scratch_parent_image.rootfs()])

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.thread_num(5).mount()

    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_anchor.overlayfs)
    workload_gen.setup_workload_generator()

    assert workload_gen.verify_entire_fs()
    workload_gen.torture_read(5, 20)
    workload_gen.finish_torture_read()
Example #2
0
def put_files(dist: Distributor, f_type, count, size):
    """Example:
    depth: 4
    width: 4
    layers:
    - layer1:
        - size: 10KB
            type: regular
            count: 2000
        - size: 12MB
            type: regular
            count: 10
        - size: 90MB
            type: regular
            count: 1
        - type: symlink
            count: 100

    """

    logging.info("putting %s, count %d", f_type, count)
    if f_type == "regular":
        size_in_bytes = utils.parse_size(size)
        dist.put_multiple_files(count, Size(size_in_bytes))
    elif f_type == "dir":
        dist.put_directories(count)
    elif f_type == "symlink":
        dist.put_symlinks(count)
    elif f_type == "hardlink":
        dist.put_hardlinks(count)
Example #3
0
def test_build_image(nydus_anchor, nydus_scratch_image: RafsImage,
                     rafs_conf: RafsConf):
    """
    title: Build nydus image
    description: Build nydus image from rootfs generating proper bootstrap and
    blob
    pass_criteria:
      - Image can successfully builded and mounted
      - Rafs can be unmounted and do a small account of read io and attr
        operation
      - Try let image builder upload blob itself.
    """

    dist = Distributor(nydus_scratch_image.rootfs(), 80, 1)
    dist.generate_tree()
    dist.put_directories(100)
    dist.put_hardlinks(90)
    dist.put_symlinks(200)
    dist.put_multiple_files(random.randint(20, 28), Size(10, Unit.MB))
    dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB))

    Whiteout().whiteout_one_file(nydus_scratch_image.rootfs(),
                                 "i/am/troublemaker/foo")

    nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image()

    rafs_conf.set_rafs_backend(backend_type=Backend.BACKEND_PROXY)
    rafs_conf.dump_rafs_conf()

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs())

    rafs.mount()
    assert wg.verify_entire_fs()
    rafs.umount()
Example #4
0
def test_prefetch_with_cache(
    nydus_anchor,
    nydus_scratch_image: RafsImage,
    rafs_conf: RafsConf,
    thread_cnt,
    compressor,
    is_cache_compressed,
):
    """
    title: Prefetch from various backend
    description:
      - Enable rafs backend blob cache, as it is disabled by default
    pass_criteria:
      - Rafs can be mounted.
      - Rafs can be unmounted.
    """
    rafs_conf.enable_rafs_blobcache(is_compressed=is_cache_compressed)
    rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY, prefix="object_prefix/")
    rafs_conf.enable_fs_prefetch(threads_count=4,
                                 bandwidth_rate=Size(40, Unit.MB).B)
    rafs_conf.dump_rafs_conf()

    dist = Distributor(nydus_scratch_image.rootfs(), 4, 4)
    dist.generate_tree()
    dist.put_directories(20)
    dist.put_multiple_files(40, Size(3, Unit.MB))
    dist.put_hardlinks(6)
    dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB))

    nydus_scratch_image.set_backend(Backend.BACKEND_PROXY,
                                    prefix="object_prefix/").create_image(
                                        compressor=compressor,
                                        readahead_policy="fs",
                                        readahead_files="/".encode(),
                                    )

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.thread_num(4).mount()

    nc = NydusAPIClient(rafs.get_apisock())
    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_scratch_image.rootfs())
    m = nc.get_blobcache_metrics()
    time.sleep(0.3)
    assert m["prefetch_data_amount"] != 0

    workload_gen.setup_workload_generator()
    workload_gen.torture_read(thread_cnt, 10)

    assert NydusAnchor.check_nydusd_health()

    workload_gen.finish_torture_read()
    assert not workload_gen.io_error

    # In this way, we can check if nydusd is crashed.
    assert rafs.is_mounted()
    rafs.umount()
Example #5
0
def test_blob_prefetch(nydus_anchor: NydusAnchor,
                       nydus_scratch_image: RafsImage, readahead_policy):
    """
    description:
        For rafs, there are two types of prefetching.
        1. Prefetch files from fs-layer, which means each file is prefetched one by one.
        2. Prefetch directly from backend/blob layer, which means a range will be fetched from blob
    """
    # Try to delete any access log since if it present, bootstrap blob prefetch won't work.
    utils.execute("rm -rf *.access", shell=True)

    dist = Distributor(nydus_scratch_image.rootfs(), 8, 2)
    dist.generate_tree()
    dist.put_directories(20)
    dist.put_multiple_files(100, Size(64, Unit.KB))
    dist.put_symlinks(30)
    dist.put_hardlinks(20)
    dist.put_multiple_files(40, Size(64, Unit.KB))

    utils.clean_pagecache()

    hint_files = dist.files[-40:]
    hint_files.extend(dist.symlinks[-20:])

    hint_files = [os.path.join("/", p) for p in hint_files]
    hint_files = "\n".join(hint_files)

    nydus_scratch_image.set_backend(Backend.LOCALFS).create_image(
        readahead_policy=readahead_policy,
        readahead_files=hint_files.encode(),
    )

    rafs_conf = RafsConf(nydus_anchor, nydus_scratch_image)
    rafs_conf.set_rafs_backend(Backend.LOCALFS, image=nydus_scratch_image)
    rafs_conf.enable_records_readahead(interval=1)
    rafs_conf.dump_rafs_conf()

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    with utils.timer("Mount elapse"):
        rafs.thread_num(7).mount()
    assert rafs.is_mounted()

    wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs())

    # TODO: Run several parallel read workers against the mount_point
    wg.setup_workload_generator()
    wg.torture_read(5, 5)
    wg.finish_torture_read()

    utils.clean_pagecache()
Example #6
0
def test_prefetch_without_cache(nydus_anchor: NydusAnchor,
                                nydus_scratch_image: RafsImage,
                                rafs_conf: RafsConf):
    """Files prefetch test

    1. relative hinted prefetch files
    2. absolute hinted prefetch files
    3. source rootfs root dir.
    """

    rafs_conf.enable_fs_prefetch().set_rafs_backend(Backend.BACKEND_PROXY)
    rafs_conf.dump_rafs_conf()

    dist = Distributor(nydus_scratch_image.rootfs(), 4, 4)
    dist.generate_tree()
    dist.put_directories(20)
    dist.put_multiple_files(40, Size(8, Unit.KB))
    dist.put_hardlinks(6)
    dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB))

    hint_files = ["/"]
    hint_files.extend(dist.files)
    hint_files.extend(dist.dirs)
    hint_files.extend(dist.symlinks)
    hint_files.extend(dist.hardlinks)

    hint_files = [os.path.join("/", p) for p in hint_files]
    hint_files = "\n".join(hint_files)

    nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image(
        readahead_policy="fs", readahead_files=hint_files.encode())

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.mount()
    assert rafs.is_mounted()

    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_scratch_image.rootfs())

    # TODO: Run several parallel read workers against the mount_point
    workload_gen.setup_workload_generator()
    workload_gen.torture_read(8, 5)
    workload_gen.finish_torture_read()

    assert NydusAnchor.check_nydusd_health()
    assert not workload_gen.io_error

    assert rafs.is_mounted()
    rafs.umount()
Example #7
0
def test_specified_prefetch(
    nydus_anchor: NydusAnchor,
    rafs_conf: RafsConf,
    nydus_scratch_image: RafsImage,
    backend,
):
    """
    description:
        Nydusd can have a list including files and directories input when started.
        Then it can prefetch files from backend per as to the list.
    """

    rafs_conf.set_rafs_backend(backend)
    rafs_conf.enable_fs_prefetch(prefetch_all=True)
    rafs_conf.enable_rafs_blobcache()
    rafs_conf.dump_rafs_conf()

    dist = Distributor(nydus_scratch_image.rootfs(), 8, 2)
    dist.generate_tree()
    dirs = dist.put_directories(20)
    dist.put_multiple_files(100, Size(64, Unit.KB))
    dist.put_symlinks(30)
    dist.put_hardlinks(20)
    dist.put_multiple_files(40, Size(64, Unit.KB))
    dist.put_single_file(Size(3, Unit.MB), name="test")

    nydus_scratch_image.set_backend(backend).create_image()

    prefetching_files = dirs
    prefetching_files += dist.files[:-10]
    prefetching_files += dist.dirs[:-5]
    prefetching_files += dist.symlinks[:-10]
    # Fuzz
    prefetching_files.append("/a/b/c/d")
    prefetching_files.append(os.path.join("/", "f/g/h/"))

    specified_dirs = " ".join(
        [os.path.join("/", d) for d in prefetching_files])

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.prefetch_files(specified_dirs).mount()
    wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs())

    nc = NydusAPIClient(rafs.get_apisock())
    wg.setup_workload_generator()
    blobcache_metrics = nc.get_blobcache_metrics()
    wg.torture_read(5, 10)

    while blobcache_metrics["prefetch_workers"] != 0:
        time.sleep(0.5)
        blobcache_metrics = nc.get_blobcache_metrics()

    begin = nc.get_backend_metrics()["read_amount_total"]
    time.sleep(1)
    end = nc.get_backend_metrics()["read_amount_total"]

    assert end == begin
    wg.finish_torture_read()
Example #8
0
def test_blobcache_recovery(
    nydus_anchor: NydusAnchor,
    rafs_conf: RafsConf,
    nydus_scratch_image: RafsImage,
):
    rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY)
    rafs_conf.enable_fs_prefetch()
    rafs_conf.enable_rafs_blobcache()
    rafs_conf.dump_rafs_conf()

    dist = Distributor(nydus_scratch_image.rootfs(), 8, 2)
    dist.generate_tree()
    dirs = dist.put_directories(20)
    dist.put_multiple_files(100, Size(64, Unit.KB))
    dist.put_symlinks(30)
    dist.put_hardlinks(20)
    dist.put_multiple_files(40, Size(64, Unit.KB))
    dist.put_single_file(Size(3, Unit.MB), name="test")

    nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image()

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.prefetch_files("/").mount()
    wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs())

    wg.setup_workload_generator()
    wg.torture_read(4, 4)

    # Hopefully, prefetch can be done in 5 secondes.
    time.sleep(5)

    wg.finish_torture_read()
    rafs.umount()

    rafs2 = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs2.mount()

    wg.torture_read(4, 4)
    time.sleep(0.5)

    nc = NydusAPIClient(rafs2.get_apisock())

    begin = nc.get_backend_metrics()["read_amount_total"]
    time.sleep(1)
    end = nc.get_backend_metrics()["read_amount_total"]

    assert end == begin == 0

    wg.finish_torture_read()
Example #9
0
def test_prefetch_with_cache(
    nydus_anchor,
    nydus_scratch_image: RafsImage,
    rafs_conf: RafsConf,
    thread_cnt,
    compressor,
    is_cache_compressed,
    converter,
    items,
):
    """
    title: Prefetch from various backend
    description:
      - Enable rafs backend blob cache, as it is disabled by default
    pass_criteria:
      - Rafs can be mounted.
      - Rafs can be unmounted.
    """

    dist = Distributor(nydus_scratch_image.rootfs(), 4, 4)
    dist.generate_tree()
    dist.put_directories(20)
    dist.put_multiple_files(40, Size(3, Unit.MB))
    dist.put_multiple_files(10, Size(5, Unit.MB))
    dist.put_hardlinks(6)
    dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB))

    nydus_scratch_image.set_backend(Backend.LOCALFS).create_image(
        image_bin=converter,
        compressor=compressor,
        readahead_policy="fs",
        readahead_files="/".encode(),
    )

    rafs_conf.enable_rafs_blobcache(
        is_compressed=is_cache_compressed).enable_fs_prefetch()
    rafs_conf.set_rafs_backend(Backend.LOCALFS, image=nydus_scratch_image)

    if len(items) > 0:
        for i in items:
            item = RafsConf.__dict__[i]
            item(rafs_conf)

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.thread_num(6).mount()

    nc = NydusAPIClient(rafs.get_apisock())
    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_scratch_image.rootfs())
    m = nc.get_blobcache_metrics()
    time.sleep(0.3)
    assert m["prefetch_data_amount"] != 0

    workload_gen.verify_entire_fs()

    workload_gen.setup_workload_generator()
    workload_gen.torture_read(thread_cnt, 6)

    assert NydusAnchor.check_nydusd_health()

    workload_gen.finish_torture_read()
    assert not workload_gen.io_error
Example #10
0
def test_stargz(
    nydus_anchor: NydusAnchor,
    rafs_conf: RafsConf,
    nydus_scratch_image: RafsImage,
):
    """
    Example command:
        stargzify file:`pwd`/foo.tar.gz foo.stargz

    """
    intermediator = "tmp.tar.gz"
    stargz_image = "tmp.stargz"

    dist = Distributor(nydus_scratch_image.rootfs(), 4, 4)
    dist.generate_tree()
    dirs = dist.put_directories(20)
    dist.put_multiple_files(100, Size(64, Unit.KB))
    dist.put_symlinks(30)
    dist.put_multiple_files(10, Size(4, Unit.MB))
    dist.put_hardlinks(20)
    dist.put_single_file(Size(3, Unit.MB), name="test")
    try:
        shutil.rmtree("origin")
    except Exception:
        pass
    shutil.copytree(nydus_scratch_image.rootfs(), "origin", symlinks=True)
    utils.write_tar_gz(nydus_scratch_image.rootfs(), intermediator)

    cmd = ["framework/bin/stargzify", f"file:{intermediator}", stargz_image]
    utils.execute(cmd)

    toc = utils.parse_stargz(stargz_image)
    image = RafsImage(
        nydus_anchor,
        toc,
        "bootstrap_scratched",
        "blob_scratched",
        clear_from_oss=True,
    )

    # This is a trick since blob name is usually a temp file created when RafsImage instantiated.
    # framework will upload stargz to oss.
    image.blob_abs_path = stargz_image
    image.set_backend(Backend.OSS).set_param(
        "blob-id", uuid.uuid4()).create_image(from_stargz=True)

    rafs_conf.set_rafs_backend(Backend.OSS)
    rafs_conf.enable_rafs_blobcache(is_compressed=True)

    rafs = RafsMount(nydus_anchor, image, rafs_conf)
    rafs.mount()

    wg = WorkloadGen(nydus_anchor.mount_point, "origin")

    wg.verify_entire_fs()

    wg.setup_workload_generator()
    wg.torture_read(4, 4)

    wg.finish_torture_read()
    assert not wg.io_error
Example #11
0
def test_whiteout(nydus_anchor, rafs_conf, whiteout_spec):
    _td_1 = tempfile.TemporaryDirectory(dir=nydus_anchor.workspace)
    _td_2 = tempfile.TemporaryDirectory(dir=nydus_anchor.workspace)
    parent_rootfs = _td_1.name
    upper_rootfs = _td_2.name

    whiteout = Whiteout(whiteout_spec)

    parent_image = RafsImage(nydus_anchor, parent_rootfs, "parent_bs",
                             "parent_blob")

    dist_parent = Distributor(parent_rootfs, 6, 4)
    dist_parent.generate_tree()
    dist_parent.put_directories(20)
    dist_parent.put_multiple_files(50, Size(32, Unit.KB))
    dist_parent.put_symlinks(30)
    dist_parent.put_hardlinks(20)

    to_be_removed = dist_parent.put_single_file(Size(7, Unit.KB))

    layered_image = RafsImage(nydus_anchor, upper_rootfs, "bs", "blob")

    dist_upper = Distributor(upper_rootfs, 3, 5)
    dist_upper.generate_tree()
    dist_upper.put_multiple_files(27, Size(3, Unit.MB))
    dist_upper.put_symlinks(5)

    # `to_be_removed` should look like `a/b/c`
    whiteout.whiteout_one_file(upper_rootfs, to_be_removed)
    # Put a whiteout file that does not hide any file from lower layer
    whiteout.whiteout_one_file(upper_rootfs, "i/am/troublemaker/foo")

    dir_to_be_whiteout_opaque = dist_parent.dirs[randint(
        0,
        len(dist_parent.dirs) - 1)]
    # `dir_to_be_removed` should look like `a/b/c`
    whiteout.whiteout_opaque_directory(upper_rootfs, dir_to_be_whiteout_opaque)

    dist_parent.put_directories(1)
    dir_to_be_removed = dist_parent.dirs[-1]
    whiteout.whiteout_one_dir(upper_rootfs, dir_to_be_removed)

    parent_image.set_backend(Backend.OSS).create_image()
    layered_image.set_backend(
        Backend.OSS).whiteout_spec(whiteout_spec).create_image(
            parent_image=parent_image)

    rafs_conf.set_rafs_backend(Backend.OSS)

    nydus_anchor.mount_overlayfs(
        [layered_image.rootfs(), parent_image.rootfs()])
    rafs = RafsMount(nydus_anchor, layered_image, rafs_conf)
    rafs.mount()

    wg = WorkloadGen(nydus_anchor.mount_point, nydus_anchor.overlayfs)

    assert not os.path.exists(
        os.path.join(nydus_anchor.mount_point, to_be_removed))
    assert not os.path.exists(
        os.path.join(nydus_anchor.mount_point, dir_to_be_removed))

    files_under_opaque_dir = os.listdir(
        os.path.join(nydus_anchor.mount_point, dir_to_be_whiteout_opaque))

    # If opaque dir has files, only file from lower layer will be hidden.
    if len(files_under_opaque_dir) != 0:
        upper_files = os.listdir(
            os.path.join(upper_rootfs, dir_to_be_whiteout_opaque))
        for f in files_under_opaque_dir:
            assert f in upper_files

    assert wg.verify_entire_fs()