Beispiel #1
0
def test_blobcache_recovery(
    nydus_anchor: NydusAnchor,
    rafs_conf: RafsConf,
    nydus_scratch_image: RafsImage,
):
    rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY)
    rafs_conf.enable_fs_prefetch()
    rafs_conf.enable_rafs_blobcache()
    rafs_conf.dump_rafs_conf()

    dist = Distributor(nydus_scratch_image.rootfs(), 8, 2)
    dist.generate_tree()
    dirs = dist.put_directories(20)
    dist.put_multiple_files(100, Size(64, Unit.KB))
    dist.put_symlinks(30)
    dist.put_hardlinks(20)
    dist.put_multiple_files(40, Size(64, Unit.KB))
    dist.put_single_file(Size(3, Unit.MB), name="test")

    nydus_scratch_image.set_backend(Backend.BACKEND_PROXY).create_image()

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.prefetch_files("/").mount()
    wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs())

    wg.setup_workload_generator()
    wg.torture_read(4, 4)

    # Hopefully, prefetch can be done in 5 secondes.
    time.sleep(5)

    wg.finish_torture_read()
    rafs.umount()

    rafs2 = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs2.mount()

    wg.torture_read(4, 4)
    time.sleep(0.5)

    nc = NydusAPIClient(rafs2.get_apisock())

    begin = nc.get_backend_metrics()["read_amount_total"]
    time.sleep(1)
    end = nc.get_backend_metrics()["read_amount_total"]

    assert end == begin == 0

    wg.finish_torture_read()
Beispiel #2
0
def test_global_metrics(nydus_anchor, nydus_image: RafsImage,
                        rafs_conf: RafsConf):
    rafs_id = "/"

    rafs_conf.enable_files_iostats().set_rafs_backend(Backend.OSS)
    nydus_image.set_backend(Backend.OSS).create_image()

    rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf)
    rafs.mount()

    nc = NydusAPIClient(rafs.get_apisock())

    gm = nc.get_global_metrics()
    assert gm["files_account_enabled"] == True
    assert gm["measure_latency"] == True

    file_counters = nc.get_files_metrics(rafs_id)
    assert len(file_counters)

    logging.info("There are %d file counters created.", len(file_counters))

    wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs())
    wg.setup_workload_generator()

    wg.io_read(4)

    file_counters = nc.get_files_metrics(rafs_id)
    assert file_counters is not None and len(file_counters)
    logging.info("There are %d file counters created after some read.",
                 len(file_counters))

    if len(file_counters):
        k = random.choice(list(file_counters))
        logging.info("ino: %s, stats: %s", k, file_counters[k])

    gm_old = nc.get_global_metrics()

    wg.io_read(4)

    gm_new = nc.get_global_metrics()
    assert gm_new["data_read"] > gm_old["data_read"]
    assert (gm_new["fop_hits"][nydusd_client.Fop.Read.get_value()] >
            gm_old["fop_hits"][nydusd_client.Fop.Read.get_value()])

    rafs.umount()
Beispiel #3
0
def test_iostats(nydus_anchor: NydusAnchor, nydus_image: RafsImage,
                 rafs_conf: RafsConf):
    rafs_id = "/"
    rafs_conf.enable_files_iostats().enable_latest_read_files(
    ).set_rafs_backend(Backend.OSS)
    nydus_image.set_backend(Backend.OSS).create_image()
    rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf)

    rafs.mount()
    assert rafs.is_mounted()

    nc = NydusAPIClient(rafs.get_apisock())

    duration = 5

    wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs())
    wg.setup_workload_generator()
    wg.torture_read(4, duration)

    while duration:
        time.sleep(1)
        duration -= 1
        nc.get_global_metrics()
        nc.get_files_metrics(rafs_id)
        nc.get_backend_metrics(rafs_id)

    wg.finish_torture_read()

    duration = 7
    wg.torture_read(4, duration)
    # Disable it firstly and then enable it.
    # TODO: files metrics can't be toggled dynamically now. Try to implement it.
    # nc.disable_files_metrics(rafs_id)
    # nc.enable_files_metrics(rafs_id)

    r = nc.get_latest_files_metrics(rafs_id)
    print(r)

    while duration:
        time.sleep(1)
        duration -= 1
        nc.get_files_metrics(rafs_id)

    wg.finish_torture_read()
    rafs.umount()
Beispiel #4
0
def test_detect_io_hang(nydus_anchor, nydus_image: RafsImage,
                        rafs_conf: RafsConf):

    rafs_conf.enable_files_iostats().set_rafs_backend(Backend.OSS)
    rafs_conf.dump_rafs_conf()

    nydus_image.set_backend(Backend.OSS).create_image()

    rafs = RafsMount(nydus_anchor, nydus_image, rafs_conf)
    rafs.thread_num(5).mount()

    wg = WorkloadGen(nydus_anchor.mount_point, nydus_image.rootfs())
    wg.setup_workload_generator()
    wg.torture_read(4, 8)

    nc = NydusAPIClient(rafs.get_apisock())

    for _ in range(0, 30):
        ops = nc.get_inflight_metrics()
        time.sleep(0.1)
        print(ops)

    wg.finish_torture_read()
Beispiel #5
0
def test_backend_swap(nydus_anchor, nydus_scratch_image: RafsImage,
                      rafs_conf: RafsConf):

    dist = Distributor(nydus_scratch_image.rootfs(), 5, 4)
    dist.generate_tree()
    dist.put_multiple_files(100, Size(2, Unit.MB))

    nydus_scratch_image.set_backend(Backend.OSS).create_image(
        readahead_policy="fs", readahead_files="/".encode())
    rafs_conf.set_rafs_backend(
        Backend.OSS).enable_rafs_blobcache().enable_fs_prefetch(
            threads_count=7, bandwidth_rate=Size(2, Unit.MB).B)
    rafs_conf.dump_rafs_conf()

    rafs = RafsMount(nydus_anchor, None, rafs_conf, with_defaults=False)
    rafs.thread_num(4).set_mountpoint(
        nydus_anchor.mount_point).apisock("api_sock").mount()

    nc = NydusAPIClient(rafs.get_apisock())
    nc.pseudo_fs_mount(nydus_scratch_image.bootstrap_path, "/",
                       rafs_conf.path(), None)
    nc.umount_rafs("/")
    assert len(os.listdir(nydus_anchor.mount_point)) == 0

    mp = "/pseudo1"
    nc.pseudo_fs_mount(nydus_scratch_image.bootstrap_path, mp,
                       rafs_conf.path(), None)

    rafs_conf_2nd = RafsConf(nydus_anchor, nydus_scratch_image)
    rafs_conf_2nd.set_rafs_backend(
        Backend.LOCALFS,
        image=nydus_scratch_image).enable_rafs_blobcache().enable_fs_prefetch(
            threads_count=3, bandwidth_rate=Size(1, Unit.MB).B)
    rafs_conf_2nd.dump_rafs_conf()

    new_image = (RafsImage(
        nydus_anchor, nydus_scratch_image.rootfs()).set_backend(
            Backend.LOCALFS).create_image(readahead_policy="fs",
                                          readahead_files="/".encode()))

    # TODO: Once upon a time, more than one fd are open. Check why this happens.
    wg = WorkloadGen(
        os.path.join(nydus_anchor.mount_point, mp.strip("/")),
        nydus_scratch_image.rootfs(),
    )

    wg.setup_workload_generator()
    wg.torture_read(8, 8)

    for i in range(1, 50):
        logging.debug("swap for the %dth time", i)
        nc.swap_backend(mp, new_image.bootstrap_name, rafs_conf_2nd.path())
        # assert nc.get_blobcache_metrics(mp)["prefetch_workers"] == 3
        time.sleep(0.2)
        nc.swap_backend(mp, nydus_scratch_image.bootstrap_name,
                        rafs_conf.path())
        utils.clean_pagecache()

    wg.finish_torture_read()

    assert wg.io_error == False

    nc.umount_rafs(mp)
    utils.clean_pagecache()
Beispiel #6
0
def test_pseudo_fs(nydus_anchor, nydus_image, rafs_conf: RafsConf):
    nydus_image.set_backend(Backend.BACKEND_PROXY).create_image()

    rafs_conf.set_rafs_backend(Backend.BACKEND_PROXY)

    rafs = RafsMount(nydus_anchor, None, rafs_conf)
    rafs.mount()
    time.sleep(1)
    nc = NydusAPIClient(rafs.get_apisock())

    try:
        shutil.rmtree("pseudo_fs_scratch")
    except FileNotFoundError:
        pass

    scratch_rootfs = shutil.copytree(nydus_image.rootfs(),
                                     "pseudo_fs_scratch",
                                     symlinks=True)
    dist = Distributor(scratch_rootfs, 5, 5)
    dist.generate_tree()
    dist.put_multiple_files(20, Size(8, Unit.KB))

    ###
    suffix = "1"
    image = RafsImage(
        nydus_anchor,
        scratch_rootfs,
        "bs" + suffix,
        "blob" + suffix,
    )
    conf = RafsConf(nydus_anchor)
    conf.enable_fs_prefetch()
    conf.enable_validation()
    conf.set_rafs_backend(Backend.BACKEND_PROXY)
    conf.dump_rafs_conf()

    image.set_backend(Backend.BACKEND_PROXY).create_image()
    nc.pseudo_fs_mount(image.bootstrap_path, f"/pseudo{suffix}", conf.path(),
                       None)
    ###
    suffix = "2"
    image = RafsImage(
        nydus_anchor,
        scratch_rootfs,
        "bs" + suffix,
        "blob" + suffix,
    )
    conf = RafsConf(nydus_anchor)
    conf.enable_rafs_blobcache()
    conf.enable_validation()
    conf.enable_records_readahead()
    conf.set_rafs_backend(Backend.BACKEND_PROXY)
    conf.dump_rafs_conf()

    dist.put_multiple_files(20, Size(8, Unit.KB))

    image.set_backend(Backend.BACKEND_PROXY).create_image()
    nc.pseudo_fs_mount(image.bootstrap_path, f"/pseudo{suffix}", conf.path(),
                       None)
    ###
    suffix = "3"
    image = RafsImage(
        nydus_anchor,
        scratch_rootfs,
        "bs" + suffix,
        "blob" + suffix,
    )
    conf = RafsConf(nydus_anchor)
    conf.enable_rafs_blobcache()
    conf.enable_records_readahead()
    conf.set_rafs_backend(Backend.BACKEND_PROXY)
    conf.dump_rafs_conf()

    dist.put_multiple_files(20, Size(8, Unit.KB))

    image.set_backend(Backend.BACKEND_PROXY).create_image()
    nc.pseudo_fs_mount(image.bootstrap_path, f"/pseudo{suffix}", conf.path(),
                       None)

    wg1 = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo1"),
                      scratch_rootfs)
    wg2 = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo2"),
                      scratch_rootfs)
    wg3 = WorkloadGen(os.path.join(nydus_anchor.mount_point, "pseudo3"),
                      scratch_rootfs)

    time.sleep(2)
    wg1.setup_workload_generator()
    wg2.setup_workload_generator()
    wg3.setup_workload_generator()

    wg1.torture_read(4, 8)
    wg2.torture_read(4, 8)
    wg3.torture_read(4, 8)

    wg1.finish_torture_read()
    wg2.finish_torture_read()
    wg3.finish_torture_read()

    # TODO: Temporarily disable the verification as hard to select `verify dir`
    # assert wg1.verify_entire_fs()
    # assert wg2.verify_entire_fs()
    # assert wg3.verify_entire_fs()

    nc.umount_rafs("/pseudo1")
    nc.umount_rafs("/pseudo2")
    nc.umount_rafs("/pseudo3")
Beispiel #7
0
def test_prefetch_with_cache(
    nydus_anchor,
    nydus_scratch_image: RafsImage,
    rafs_conf: RafsConf,
    thread_cnt,
    compressor,
    is_cache_compressed,
    converter,
    items,
):
    """
    title: Prefetch from various backend
    description:
      - Enable rafs backend blob cache, as it is disabled by default
    pass_criteria:
      - Rafs can be mounted.
      - Rafs can be unmounted.
    """

    dist = Distributor(nydus_scratch_image.rootfs(), 4, 4)
    dist.generate_tree()
    dist.put_directories(20)
    dist.put_multiple_files(40, Size(3, Unit.MB))
    dist.put_multiple_files(10, Size(5, Unit.MB))
    dist.put_hardlinks(6)
    dist.put_multiple_chinese_files(random.randint(20, 28), Size(20, Unit.KB))

    nydus_scratch_image.set_backend(Backend.LOCALFS).create_image(
        image_bin=converter,
        compressor=compressor,
        readahead_policy="fs",
        readahead_files="/".encode(),
    )

    rafs_conf.enable_rafs_blobcache(
        is_compressed=is_cache_compressed).enable_fs_prefetch()
    rafs_conf.set_rafs_backend(Backend.LOCALFS, image=nydus_scratch_image)

    if len(items) > 0:
        for i in items:
            item = RafsConf.__dict__[i]
            item(rafs_conf)

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.thread_num(6).mount()

    nc = NydusAPIClient(rafs.get_apisock())
    workload_gen = WorkloadGen(nydus_anchor.mount_point,
                               nydus_scratch_image.rootfs())
    m = nc.get_blobcache_metrics()
    time.sleep(0.3)
    assert m["prefetch_data_amount"] != 0

    workload_gen.verify_entire_fs()

    workload_gen.setup_workload_generator()
    workload_gen.torture_read(thread_cnt, 6)

    assert NydusAnchor.check_nydusd_health()

    workload_gen.finish_torture_read()
    assert not workload_gen.io_error