Пример #1
0
def test_start_too_small_device(pyocf_ctx, mode, cls):
    """Starting cache with device below 100MiB
    Check if starting cache with device below minimum size is blocked
    """

    cache_device = Volume(Size.from_B(20 * 1024 * 1024 - 1))

    with pytest.raises(OcfError, match="OCF_ERR_INVAL_CACHE_DEV"):
        Cache.start_on_device(cache_device,
                              cache_mode=mode,
                              cache_line_size=cls)
Пример #2
0
def test_start_cache_same_id(pyocf_ctx, mode, cls):
    """Adding two caches with the same name
    Check that OCF does not allow for 2 caches to be started with the same cache_name
    """

    cache_device1 = Volume(Size.from_MiB(20))
    cache_device2 = Volume(Size.from_MiB(20))
    cache_name = "cache"
    cache = Cache.start_on_device(cache_device1,
                                  cache_mode=mode,
                                  cache_line_size=cls,
                                  name=cache_name)
    cache.get_stats()

    with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"):
        cache = Cache.start_on_device(cache_device2,
                                      cache_mode=mode,
                                      cache_line_size=cls,
                                      name=cache_name)
    cache.get_stats()
Пример #3
0
def test_start_check_default(pyocf_ctx):
    """Test if default values are correct after start.
    """

    cache_device = Volume(Size.from_MiB(40))
    core_device = Volume(Size.from_MiB(10))
    cache = Cache.start_on_device(cache_device)

    core = Core.using_device(core_device)
    cache.add_core(core)

    # Check if values are default
    stats = cache.get_stats()
    assert stats["conf"]["cleaning_policy"] == CleaningPolicy.DEFAULT
    assert stats["conf"]["cache_mode"] == CacheMode.DEFAULT
    assert stats["conf"]["cache_line_size"] == CacheLineSize.DEFAULT
    assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT

    core_stats = core.get_stats()
    assert core_stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
Пример #4
0
def test_change_cache_mode(pyocf_ctx, from_cm, to_cm, cls):
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=from_cm,
                                  cache_line_size=cls)

    # Change cache mode and check if stats are as expected
    cache.change_cache_mode(to_cm)
    stats_after = cache.get_stats()
    assert stats_after["conf"]["cache_mode"] == to_cm
Пример #5
0
def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=cache_mode,
                                  cache_line_size=cls)
    core_devices = []
    core_amount = 5

    # Create 5 core devices and add to cache
    for i in range(0, core_amount):
        core_device = Volume(S.from_MiB(10))
        core = Core.using_device(core_device)
        core_devices.append(core)
        cache.add_core(core)

    # Check that core count is as expected
    stats = cache.get_stats()
    assert stats["conf"]["core_count"] == core_amount

    # Remove 3 cores
    cache.remove_core(core_devices[0])
    cache.remove_core(core_devices[1])
    cache.remove_core(core_devices[2])

    # Add 2 cores and check if core count is as expected
    cache.add_core(core_devices[0])
    cache.add_core(core_devices[1])
    stats = cache.get_stats()
    assert stats["conf"]["core_count"] == core_amount - 1

    # Remove 1 core and check if core count is as expected
    cache.remove_core(core_devices[1])
    stats = cache.get_stats()
    assert stats["conf"]["core_count"] == core_amount - 2

    # Add 2 cores and check if core count is as expected
    cache.add_core(core_devices[1])
    cache.add_core(core_devices[2])
    stats = cache.get_stats()
    assert stats["conf"]["core_count"] == core_amount
Пример #6
0
def test_adding_core(pyocf_ctx, cache_mode, cls):
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=cache_mode,
                                  cache_line_size=cls)

    # Create core device
    core_device = Volume(S.from_MiB(10))
    core = Core.using_device(core_device)

    # Check statistics before adding core
    stats = cache.get_stats()
    assert stats["conf"]["core_count"] == 0

    # Add core to cache
    cache.add_core(core)

    # Check statistics after adding core
    stats = cache.get_stats()
    assert stats["conf"]["core_count"] == 1
Пример #7
0
def test_start_cache_same_device(pyocf_ctx, mode, cls):
    """Adding two caches using the same cache device
    Check that OCF does not allow for 2 caches using the same cache device to be started
    """

    cache_device = Volume(Size.from_MiB(20))
    cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
    cache.get_stats()

    with pytest.raises(OcfError, match="OCF_ERR_NOT_OPEN_EXC"):
        cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
    cache.get_stats()
Пример #8
0
def test_adding_core_twice(pyocf_ctx, cache_mode, cls):
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=cache_mode,
                                  cache_line_size=cls)

    # Create core device
    core_device = Volume(S.from_MiB(10))
    core = Core.using_device(core_device)

    # Add core
    cache.add_core(core)

    # Check that it is not possible to add the same core again
    with pytest.raises(OcfError):
        cache.add_core(core)

    # Check that core count is still equal to one
    stats = cache.get_stats()
    assert stats["conf"]["core_count"] == 1
Пример #9
0
def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode,
                                         cls: CacheLineSize):
    """Starting cache in different modes with different cache line sizes.
    After start check proper cache mode behaviour, starting with read operation.
    """

    cache_device = Volume(Size.from_MiB(20))
    core_device = Volume(Size.from_MiB(5))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=mode,
                                  cache_line_size=cls)
    core_exported = Core.using_device(core_device)

    cache.add_core(core_exported)

    logger.info("[STAGE] Initial write to core device")
    test_data = Data.from_string("This is test data")
    io_to_core(core_exported, test_data, 20, True)

    cache_device.reset_stats()
    core_device.reset_stats()

    logger.info("[STAGE] Initial read from exported object")
    io_from_exported_object(core_exported, test_data.size, 20)
    check_stats_read_empty(core_exported, mode, cls)

    logger.info("[STAGE] Write to exported object after initial read")
    cache_device.reset_stats()
    core_device.reset_stats()

    test_data = Data.from_string("Changed test data")

    io_to_core(core_exported, test_data, 20)
    check_stats_write_after_read(core_exported, mode, cls, True)

    logger.info("[STAGE] Read from exported object after write")
    io_from_exported_object(core_exported, test_data.size, 20)
    check_stats_read_after_write(core_exported, mode, cls)

    check_md5_sums(core_exported, mode)
Пример #10
0
def test_core_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(
        cache_device, cache_mode=cm, cache_line_size=cls
    )

    # Create 2 core devices
    core_device1 = Volume(S.from_MiB(10))
    core1 = Core.using_device(core_device1, name="core1")
    core_device2 = Volume(S.from_MiB(10))
    core2 = Core.using_device(core_device2, name="core2")

    # Add cores
    cache.add_core(core1)
    cache.add_core(core2)

    # Check all possible seq cut off policy switches for first core
    for seq_from in SeqCutOffPolicy:
        for seq_to in SeqCutOffPolicy:
            core1.set_seq_cut_off_policy(seq_from.value)

            # Check if seq cut off policy of the first core is correct
            stats = core1.get_stats()
            assert stats["seq_cutoff_policy"] == seq_from.value

            # Check if seq cut off policy of the second core did not change
            stats = core2.get_stats()
            assert stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT

            core1.set_seq_cut_off_policy(seq_to.value)

            # Check if seq cut off policy of the first core is correct
            stats = core1.get_stats()
            assert stats["seq_cutoff_policy"] == seq_to.value

            # Check if seq cut off policy of the second core did not change
            stats = core2.get_stats()
            assert stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls):
    """
    Test whether it is possible to start cache with various cache name value.
    :param pyocf_ctx: basic pyocf context fixture
    :param string_randomize: fuzzed cache name value to start cache with
    :param cm: cache mode value to start cache with
    :param cls: cache line size value to start cache with
    """
    cache_device = Volume(Size.from_MiB(30))
    try:
        cache = Cache.start_on_device(cache_device, name=string_randomize, cache_mode=cm, cache_line_size=cls)
    except:
        logger.error(f"Cache did not start properly with correct name value: {string_randomize}")
    cache.stop()
Пример #12
0
def test_start_stop_incrementally(pyocf_ctx):
    """Starting/stopping multiple caches incrementally.
    Check whether OCF behaves correctly when few caches at a time are
    in turns added and removed (#added > #removed) until their number reaches limit,
    and then proportions are reversed and number of caches gradually falls to 0.
    """

    counter = count()
    caches = []
    caches_limit = 10
    add = True
    run = True
    increase = True
    while run:
        if add:
            for i in range(0,
                           randrange(3, 5) if increase else randrange(1, 3)):
                cache_device = Volume(Size.from_MiB(20))
                cache_name = f"cache{next(counter)}"
                cache_mode = CacheMode(randrange(0, len(CacheMode)))
                size = 4096 * 2**randrange(0, len(CacheLineSize))
                cache_line_size = CacheLineSize(size)

                cache = Cache.start_on_device(cache_device,
                                              name=cache_name,
                                              cache_mode=cache_mode,
                                              cache_line_size=cache_line_size)
                caches.append(cache)
                stats = cache.get_stats()
                assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
                assert stats["conf"][
                    "cache_line_size"] == cache_line_size, "Cache line size"
                assert stats["conf"]["cache_name"] == cache_name, "Cache name"
                if len(caches) == caches_limit:
                    increase = False
        else:
            for i in range(0,
                           randrange(1, 3) if increase else randrange(3, 5)):
                if len(caches) == 0:
                    run = False
                    break
                cache = caches.pop()
                logger.info("Getting stats before stopping cache")
                stats = cache.get_stats()
                cache_name = stats["conf"]["cache_name"]
                cache.stop()
                assert get_cache_by_name(pyocf_ctx, cache_name) != 0, \
                    "Try getting cache after stopping it"
        add = not add
Пример #13
0
def test_neg_change_cache_mode(pyocf_ctx, cm, cls):
    """
    Test whether it is possible to change cache mode to invalid value.
    :param pyocf_ctx: basic pyocf context fixture
    :param cm: cache mode we start with
    :param cls: cache line size we start with
    """
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)

    # Change cache mode to invalid one and check if failed
    for i in RandomGenerator(DefaultRanges.UINT32):
        if i in [item.value for item in CacheMode]:
            continue
        with pytest.raises(OcfError, match="Error changing cache mode"):
            cache.change_cache_mode(i)
Пример #14
0
def test_neg_set_acp_param(pyocf_ctx, cm, cls):
    """
    Test whether it is possible to set invalid param for acp cleaning policy
    :param pyocf_ctx: basic pyocf context fixture
    :param cm: cache mode we start with
    :param cls: cache line size we start with
    :return:
    """
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)

    # Change invalid acp param and check if failed
    for i in RandomGenerator(DefaultRanges.UINT32):
        if i in [item.value for item in AcpParams]:
            continue
        with pytest.raises(OcfError, match="Error setting cleaning policy param"):
            cache.set_cleaning_policy_param(CleaningPolicy.ALRU, i, 1)
Пример #15
0
def test_neg_set_promotion_policy(pyocf_ctx, cm, cls):
    """
    Test whether it is possible to set invalid param for promotion policy
    :param pyocf_ctx: basic pyocf context fixture
    :param cm: cache mode we start with
    :param cls: cache line size we start with
    :return:
    """
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)

    # Change to invalid promotion policy and check if failed
    for i in RandomGenerator(DefaultRanges.UINT32):
        if i in [item.value for item in PromotionPolicy]:
            continue
        with pytest.raises(OcfError, match="Error setting promotion policy"):
            cache.set_promotion_policy(i)
Пример #16
0
def test_neg_attach_cls(pyocf_ctx, cm, cls):
    """
    Test whether it is possible to change cache line size to
    invalid value while attaching cache device
    :param pyocf_ctx: basic pyocf context fixture
    :param cm: cache mode we start with
    :param cls: cache line size we start with
    :return:
    """
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache(owner=cache_device.owner, cache_mode=cm, cache_line_size=cls)
    cache.start_cache()

    # Check whether it is possible to attach cache device with invalid cache line size
    for i in RandomGenerator(DefaultRanges.UINT64):
        if i in [item.value for item in CacheLineSize]:
            continue
        with pytest.raises(OcfError, match="Attaching cache device failed"):
            cache.attach_device(cache_device, cache_line_size=i)
Пример #17
0
def test_neg_set_cleaning_policy(pyocf_ctx, cm, cls):
    """
    Test whether it is possible to change cleaning policy to invalid value
    :param pyocf_ctx: basic pyocf context fixture
    :param cm: cache mode we start with
    :param cls: cache line size we start with
    :return:
    """
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(
        cache_device, cache_mode=cm, cache_line_size=cls
    )

    # Set cleaning policy to invalid one and check if failed
    for i in generate_random_numbers(c_uint32):
        if i in [item.value for item in CleaningPolicy]:
            continue
        with pytest.raises(OcfError, match="Error changing cleaning policy"):
            cache.set_cleaning_policy(i)
Пример #18
0
def test_change_cleaning_policy(pyocf_ctx, cm, cls):
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(
        cache_device, cache_mode=cm, cache_line_size=cls
    )

    # Check all possible cleaning policy switches
    for cp_from in CleaningPolicy:
        for cp_to in CleaningPolicy:
            cache.set_cleaning_policy(cp_from.value)

            # Check if cleaning policy is correct
            stats = cache.get_stats()
            assert stats["conf"]["cleaning_policy"] == cp_from.value

            cache.set_cleaning_policy(cp_to.value)

            # Check if cleaning policy is correct
            stats = cache.get_stats()
            assert stats["conf"]["cleaning_policy"] == cp_to.value
Пример #19
0
def test_100_start_stop(pyocf_ctx):
    """Starting/stopping stress test.
    Check OCF behaviour when cache is started and stopped continuously
    """

    for i in range(1, 101):
        cache_device = Volume(Size.from_MiB(20))
        cache_mode = CacheMode(randrange(0, len(CacheMode)))
        size = 4096 * 2**randrange(0, len(CacheLineSize))
        cache_line_size = CacheLineSize(size)

        cache = Cache.start_on_device(
            cache_device,
            cache_mode=cache_mode,
            cache_line_size=cache_line_size)
        stats = cache.get_stats()
        assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
        assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
        assert stats["conf"]["cache_id"] == 1, "Cache id"
        cache.stop()
        assert get_cache_by_id(pyocf_ctx, 1) != 0, "Try getting cache after stopping it"
Пример #20
0
def test_neg_set_ioclass_name_len(pyocf_ctx):
    """
    Test whether it is possible to add ioclass with too long name
    :param pyocf_ctx: basic pyocf context fixture
    :return:
    """

    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=CacheMode.WT,
                                  cache_line_size=CacheLineSize.LINE_4KiB)

    # Set invalid name and check if failed
    for name in RandomStringGenerator(len_range=Range(1025, 4096),
                                      count=10000):
        with pytest.raises(OcfError, match="Error adding partition to cache"):
            cache.configure_partition(part_id=1,
                                      name=name,
                                      max_size=100,
                                      priority=1)
            print(f"\n{name}")
Пример #21
0
def test_neg_set_nhit_promotion_policy_param(pyocf_ctx, cm, cls):
    """
    Test whether it is possible to set invalid promotion policy param id for nhit promotion policy
    :param pyocf_ctx: basic pyocf context fixture
    :param cm: cache mode we start with
    :param cls: cache line size we start with
    :return:
    """
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(
        cache_device,
        cache_mode=cm,
        cache_line_size=cls,
        promotion_policy=PromotionPolicy.NHIT,
    )

    # Set invalid promotion policy param id and check if failed
    for i in RandomGenerator(DefaultRanges.UINT8):
        if i in [item.value for item in NhitParams]:
            continue
        with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
            cache.set_promotion_policy_param(PromotionPolicy.NHIT, i, 1)
Пример #22
0
def test_change_to_nhit_and_back_io_in_flight(pyocf_ctx):
    """
    Try switching promotion policy during io, no io's should return with error

    1. Create core/cache pair with promotion policy ALWAYS
    2. Issue IOs without waiting for completion
    3. Change promotion policy to NHIT
    4. Wait for IO completions
        * no IOs should fail
    5. Issue IOs without waiting for completion
    6. Change promotion policy to ALWAYS
    7. Wait for IO completions
        * no IOs should fail
    """

    # Step 1
    cache_device = Volume(Size.from_MiB(30))
    core_device = Volume(Size.from_MiB(30))

    cache = Cache.start_on_device(cache_device)
    core = Core.using_device(core_device)

    cache.add_core(core)

    # Step 2
    completions = []
    for i in range(2000):
        comp = OcfCompletion([("error", c_int)])
        write_data = Data(4096)
        io = core.new_io(cache.get_default_queue(), i * 4096, write_data.size,
                         IoDir.WRITE, 0, 0)
        completions += [comp]
        io.set_data(write_data)
        io.callback = comp.callback
        io.submit()

    # Step 3
    cache.set_promotion_policy(PromotionPolicy.NHIT)

    # Step 4
    for c in completions:
        c.wait()
        assert not c.results[
            "error"], "No IO's should fail when turning NHIT policy on"

    # Step 5
    completions = []
    for i in range(2000):
        comp = OcfCompletion([("error", c_int)])
        write_data = Data(4096)
        io = core.new_io(cache.get_default_queue(), i * 4096, write_data.size,
                         IoDir.WRITE, 0, 0)
        completions += [comp]
        io.set_data(write_data)
        io.callback = comp.callback
        io.submit()

    # Step 6
    cache.set_promotion_policy(PromotionPolicy.ALWAYS)

    # Step 7
    for c in completions:
        c.wait()
        assert not c.results[
            "error"], "No IO's should fail when turning NHIT policy off"
Пример #23
0
def test_promoted_after_hits_various_thresholds(pyocf_ctx, insertion_threshold,
                                                fill_percentage):
    """
    Check promotion policy behavior with various set thresholds

    1. Create core/cache pair with promotion policy NHIT
    2. Set TRIGGER_THRESHOLD/INSERTION_THRESHOLD to predefined values
    3. Fill cache from the beggining until occupancy reaches TRIGGER_THRESHOLD%
    4. Issue INSERTION_THRESHOLD - 1 requests to core line not inserted to cache
        * occupancy should not change
    5. Issue one request to LBA from step 4
        * occupancy should rise by one cache line
    """

    # Step 1
    cache_device = Volume(Size.from_MiB(30))
    core_device = Volume(Size.from_MiB(30))

    cache = Cache.start_on_device(cache_device,
                                  promotion_policy=PromotionPolicy.NHIT)
    core = Core.using_device(core_device)
    cache.add_core(core)

    # Step 2
    cache.set_promotion_policy_param(PromotionPolicy.NHIT,
                                     NhitParams.TRIGGER_THRESHOLD,
                                     fill_percentage)
    cache.set_promotion_policy_param(PromotionPolicy.NHIT,
                                     NhitParams.INSERTION_THRESHOLD,
                                     insertion_threshold)
    # Step 3
    fill_cache(cache, fill_percentage / 100)

    stats = cache.get_stats()
    cache_lines = stats["conf"]["size"]
    assert stats["usage"]["occupancy"]["fraction"] // 10 == fill_percentage * 10
    filled_occupancy = stats["usage"]["occupancy"]["value"]

    # Step 4
    last_core_line = int(core_device.size) - cache_lines.line_size
    completions = []
    for i in range(insertion_threshold - 1):
        comp = OcfCompletion([("error", c_int)])
        write_data = Data(cache_lines.line_size)
        io = core.new_io(
            cache.get_default_queue(),
            last_core_line,
            write_data.size,
            IoDir.WRITE,
            0,
            0,
        )
        completions += [comp]
        io.set_data(write_data)
        io.callback = comp.callback
        io.submit()

    for c in completions:
        c.wait()

    stats = cache.get_stats()
    threshold_reached_occupancy = stats["usage"]["occupancy"]["value"]
    assert threshold_reached_occupancy == filled_occupancy, (
        "No insertion should occur while NHIT is triggered and core line ",
        "didn't reach INSERTION_THRESHOLD",
    )

    # Step 5
    comp = OcfCompletion([("error", c_int)])
    write_data = Data(cache_lines.line_size)
    io = core.new_io(cache.get_default_queue(), last_core_line,
                     write_data.size, IoDir.WRITE, 0, 0)
    io.set_data(write_data)
    io.callback = comp.callback
    io.submit()

    comp.wait()

    assert (threshold_reached_occupancy ==
            cache.get_stats()["usage"]["occupancy"]["value"] -
            1), "Previous request should be promoted and occupancy should rise"
Пример #24
0
def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode,
                               rand_seed):
    CACHELINE_COUNT = 9
    SECTOR_SIZE = Size.from_sector(1).B
    CLS = cacheline_size // SECTOR_SIZE
    WORKSET_SIZE = CACHELINE_COUNT * cacheline_size
    WORKSET_OFFSET = 128 * cacheline_size
    SECTOR_COUNT = int(WORKSET_SIZE / SECTOR_SIZE)
    ITRATION_COUNT = 50

    random.seed(rand_seed)

    # start sector for each region (positions of '*' on the above diagram)
    region_start = ([0, 3 * CLS, 4 * CLS - 1] +
                    [4 * CLS + i
                     for i in range(CLS)] + [5 * CLS, 5 * CLS + 1, 6 * CLS])
    num_regions = len(region_start)
    # possible IO start sectors for test iteration  (positions of '>' on the above diagram)
    start_sec = [0, CLS, 2 * CLS, 3 * CLS, 4 * CLS - 2, 4 * CLS - 1
                 ] + [4 * CLS + i for i in range(CLS)]
    # possible IO end sectors for test iteration (positions o '<' on the above diagram)
    end_sec = ([3 * CLS - 1] + [4 * CLS + i for i in range(CLS)] + [
        5 * CLS, 5 * CLS + 1, 6 * CLS - 1, 7 * CLS - 1, 8 * CLS - 1,
        9 * CLS - 1
    ])

    data = {}
    # memset n-th sector of core data with n << 2
    data[SectorStatus.INVALID] = bytes([
        get_byte(((x // SECTOR_SIZE) << 2) + 0, x % 4)
        for x in range(WORKSET_SIZE)
    ])
    # memset n-th sector of clean data with n << 2 + 1
    data[SectorStatus.CLEAN] = bytes([
        get_byte(((x // SECTOR_SIZE) << 2) + 1, x % 4)
        for x in range(WORKSET_SIZE)
    ])
    # memset n-th sector of dirty data with n << 2 + 2
    data[SectorStatus.DIRTY] = bytes([
        get_byte(((x // SECTOR_SIZE) << 2) + 2, x % 4)
        for x in range(WORKSET_SIZE)
    ])

    result_b = bytes(WORKSET_SIZE)

    cache_device = Volume(Size.from_MiB(30))
    core_device = Volume(Size.from_MiB(30))

    cache = Cache.start_on_device(cache_device,
                                  cache_mode=CacheMode.WO,
                                  cache_line_size=cacheline_size)
    core = Core.using_device(core_device)

    cache.add_core(core)

    insert_order = list(range(CACHELINE_COUNT))

    # set fixed generated sector statuses
    region_statuses = [
        [I, I, I] + [I for i in range(CLS)] + [I, I, I],
        [I, I, I] + [D for i in range(CLS)] + [I, I, I],
        [I, I, I] + [C for i in range(CLS)] + [I, I, I],
        [I, I, I] + [D for i in range(CLS // 2 - 1)] + [I] +
        [D for i in range(CLS // 2)] + [I, I, I],
        [I, I, I] + [D for i in range(CLS // 2 - 1)] + [I, I] +
        [D for i in range(CLS // 2 - 1)] + [I, I, I],
        [I, I, I] + [D for i in range(CLS // 2 - 2)] + [I, I, D, C] +
        [D for i in range(CLS // 2 - 2)] + [I, I, I],
        [I, I, D] + [D for i in range(CLS)] + [D, I, I],
        [I, I, D] + [D for i in range(CLS // 2 - 1)] + [I] +
        [D for i in range(CLS // 2)] + [D, I, I],
    ]

    # add randomly generated sector statuses
    for _ in range(ITRATION_COUNT - len(region_statuses)):
        region_statuses.append(
            [random.choice(list(SectorStatus)) for _ in range(num_regions)])

    # iterate over generated status combinations and perform the test
    for region_state in region_statuses:
        # write data to core and invalidate all CL and write data pattern to core
        cache.change_cache_mode(cache_mode=CacheMode.PT)
        io_to_exp_obj(
            core,
            WORKSET_OFFSET,
            len(data[SectorStatus.INVALID]),
            data[SectorStatus.INVALID],
            0,
            IoDir.WRITE,
        )

        # randomize cacheline insertion order to exercise different
        # paths with regard to cache I/O physical addresses continuousness
        random.shuffle(insert_order)
        sectors = [
            insert_order[i // CLS] * CLS + (i % CLS)
            for i in range(SECTOR_COUNT)
        ]

        # insert clean sectors - iterate over cachelines in @insert_order order
        cache.change_cache_mode(cache_mode=CacheMode.WT)
        for sec in sectors:
            region = sector_to_region(sec, region_start)
            if region_state[region] != SectorStatus.INVALID:
                io_to_exp_obj(
                    core,
                    WORKSET_OFFSET + SECTOR_SIZE * sec,
                    SECTOR_SIZE,
                    data[SectorStatus.CLEAN],
                    sec * SECTOR_SIZE,
                    IoDir.WRITE,
                )

        # write dirty sectors
        cache.change_cache_mode(cache_mode=CacheMode.WB)
        for sec in sectors:
            region = sector_to_region(sec, region_start)
            if region_state[region] == SectorStatus.DIRTY:
                io_to_exp_obj(
                    core,
                    WORKSET_OFFSET + SECTOR_SIZE * sec,
                    SECTOR_SIZE,
                    data[SectorStatus.DIRTY],
                    sec * SECTOR_SIZE,
                    IoDir.WRITE,
                )

        cache.change_cache_mode(cache_mode=cache_mode)

        core_device.reset_stats()

        # get up to 32 randomly selected pairs of (start,end) sectors
        # 32 is enough to cover all combinations for 4K and 8K cacheline size
        io_ranges = [(s, e) for s, e in product(start_sec, end_sec) if s < e]
        random.shuffle(io_ranges)
        io_ranges = io_ranges[:32]

        # run the test for each selected IO range for currently set up region status
        for start, end in io_ranges:
            print_test_case(region_start, region_state, start, end,
                            SECTOR_COUNT, CLS)

            # issue read
            START = start * SECTOR_SIZE
            END = end * SECTOR_SIZE
            size = (end - start + 1) * SECTOR_SIZE
            assert 0 == io_to_exp_obj(
                core, WORKSET_OFFSET + START, size, result_b, START, IoDir.READ
            ), "error reading in {}: region_state={}, start={}, end={}, insert_order={}".format(
                cache_mode, region_state, start, end, insert_order)

            # verify read data
            for sec in range(start, end + 1):
                # just check the first 32bits of sector (this is the size of fill pattern)
                region = sector_to_region(sec, region_start)
                start_byte = sec * SECTOR_SIZE
                expected_data = bytes_to_uint32(
                    data[region_state[region]][start_byte + 0],
                    data[region_state[region]][start_byte + 1],
                    data[region_state[region]][start_byte + 2],
                    data[region_state[region]][start_byte + 3],
                )
                actual_data = bytes_to_uint32(
                    result_b[start_byte + 0],
                    result_b[start_byte + 1],
                    result_b[start_byte + 2],
                    result_b[start_byte + 3],
                )

                assert (
                    actual_data == expected_data
                ), "unexpected data in sector {}, region_state={}, start={}, end={}, insert_order={}\n".format(
                    sec, region_state, start, end, insert_order)

            if cache_mode == CacheMode.WO:
                # WO is not supposed to clean dirty data
                assert (
                    core_device.get_stats()[IoDir.WRITE] == 0
                ), "unexpected write to core device, region_state={}, start={}, end={}, insert_order = {}\n".format(
                    region_state, start, end, insert_order)
Пример #25
0
def test_seq_cutoff_max_streams(pyocf_ctx):
    """
    Test number of sequential streams tracked by OCF.

    MAX_STREAMS is the maximal amount of streams which OCF is able to track.

    1. Issue MAX_STREAMS requests (write or reads) to cache, 1 sector shorter than
        seq cutoff threshold
    2. Issue MAX_STREAMS-1 requests continuing the streams from 1. to surpass the threshold and
        check if cutoff was triggered (requests used PT engine)
    3. Issue single request to stream not used in 1. or 2. and check if it's been handled by cache
    4. Issue single request to stream least recently used in 1. and 2. and check if it's been
        handled by cache. It should no longer be tracked by OCF, because of request in step 3. which
        overflowed the OCF handling structure)
    """
    MAX_STREAMS = 256
    TEST_STREAMS = MAX_STREAMS + 1  # Number of streams used by test - one more than OCF can track
    core_size = Size.from_MiB(200)
    threshold = Size.from_KiB(4)

    streams = [
        Stream(
            last=Size((stream_no * int(core_size) // TEST_STREAMS),
                      sector_aligned=True),
            length=Size(0),
            direction=choice(list(IoDir)),
        ) for stream_no in range(TEST_STREAMS)
    ]  # Generate MAX_STREAMS + 1 non-overlapping streams

    # Remove one stream - this is the one we are going to use to overflow OCF tracking structure
    # in step 3
    non_active_stream = choice(streams)
    streams.remove(non_active_stream)

    cache = Cache.start_on_device(Volume(Size.from_MiB(200)),
                                  cache_mode=CacheMode.WT)
    core = Core.using_device(Volume(core_size))

    cache.add_core(core)

    cache.set_seq_cut_off_policy(SeqCutOffPolicy.ALWAYS)
    cache.set_seq_cut_off_threshold(threshold)

    # STEP 1
    shuffle(streams)
    io_size = threshold - Size.from_sector(1)
    io_to_streams(core, streams, io_size)

    stats = cache.get_stats()
    assert (stats["req"]["serviced"]["value"] == stats["req"]["total"]["value"]
            == len(streams)), "All request should be serviced - no cutoff"

    old_serviced = len(streams)

    # STEP 2
    lru_stream = streams[0]
    streams.remove(lru_stream)

    shuffle(streams)
    io_to_streams(core, streams, Size.from_sector(1))

    stats = cache.get_stats()
    assert (
        stats["req"]["serviced"]["value"] == old_serviced
    ), "Serviced requests stat should not increase - cutoff engaged for all"
    assert stats["req"]["wr_pt"]["value"] + stats["req"]["rd_pt"][
        "value"] == len(
            streams
        ), "All streams should be handled in PT - cutoff engaged for all streams"

    # STEP 3
    io_to_streams(core, [non_active_stream], Size.from_sector(1))

    stats = cache.get_stats()
    assert (
        stats["req"]["serviced"]["value"] == old_serviced + 1
    ), "This request should be serviced by cache - no cutoff for inactive stream"

    # STEP 4
    io_to_streams(core, [lru_stream], Size.from_sector(1))

    stats = cache.get_stats()
    assert (
        stats["req"]["serviced"]["value"] == old_serviced + 2
    ), "This request should be serviced by cache - lru_stream should be no longer tracked"
Пример #26
0
def test_wo_read_data_consistency(pyocf_ctx):
    # start sector for each region
    region_start = [0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
    # possible start sectors for test iteration
    start_sec = [0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
    # possible end sectors for test iteration
    end_sec = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 23]

    CACHELINE_COUNT = 3
    CACHELINE_SIZE = 4096
    SECTOR_SIZE = Size.from_sector(1).B
    CLS = CACHELINE_SIZE // SECTOR_SIZE
    WORKSET_SIZE = CACHELINE_COUNT * CACHELINE_SIZE
    WORKSET_OFFSET = 1024 * CACHELINE_SIZE
    SECTOR_COUNT = int(WORKSET_SIZE / SECTOR_SIZE)
    ITRATION_COUNT = 200

    # fixed test cases
    fixed_combinations = [
        [I, I, D, D, D, D, D, D, D, D, I, I],
        [I, I, C, C, C, C, C, C, C, C, I, I],
        [I, I, D, D, D, I, D, D, D, D, I, I],
        [I, I, D, D, D, I, I, D, D, D, I, I],
        [I, I, I, I, D, I, I, D, C, D, I, I],
        [I, D, D, D, D, D, D, D, D, D, D, I],
        [C, C, I, D, D, I, D, D, D, D, D, I],
        [D, D, D, D, D, D, D, D, D, D, D, I],
    ]

    data = {}
    # memset n-th sector of core data with n
    data[SectorStatus.INVALID] = bytes(
        [x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
    # memset n-th sector of clean data with n + 100
    data[SectorStatus.CLEAN] = bytes(
        [100 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
    # memset n-th sector of dirty data with n + 200
    data[SectorStatus.DIRTY] = bytes(
        [200 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)])

    result_b = bytes(WORKSET_SIZE)

    cache_device = Volume(Size.from_MiB(30))
    core_device = Volume(Size.from_MiB(30))

    cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WO)
    core = Core.using_device(core_device)

    cache.add_core(core)

    insert_order = [x for x in range(CACHELINE_COUNT)]

    # generate regions status combinations and shuffle it
    combinations = []
    state_combinations = product(SectorStatus, repeat=len(region_start))
    for S in state_combinations:
        combinations.append(S)
    random.shuffle(combinations)

    # add fixed test cases at the beginning
    combinations = fixed_combinations + combinations

    for S in combinations[:ITRATION_COUNT]:
        # write data to core and invalidate all CL
        cache.change_cache_mode(cache_mode=CacheMode.PT)
        io_to_exp_obj(core, WORKSET_OFFSET, len(data[SectorStatus.INVALID]),
                      data[SectorStatus.INVALID], 0, IoDir.WRITE)

        # randomize cacheline insertion order to exercise different
        # paths with regard to cache I/O physical addresses continuousness
        random.shuffle(insert_order)
        sectors = [
            insert_order[i // CLS] * CLS + (i % CLS)
            for i in range(SECTOR_COUNT)
        ]

        # insert clean sectors - iterate over cachelines in @insert_order order
        cache.change_cache_mode(cache_mode=CacheMode.WT)
        for sec in sectors:
            region = sector_to_region(sec, region_start)
            if S[region] != SectorStatus.INVALID:
                io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec,
                              SECTOR_SIZE, data[SectorStatus.CLEAN],
                              sec * SECTOR_SIZE, IoDir.WRITE)

        # write dirty sectors
        cache.change_cache_mode(cache_mode=CacheMode.WO)
        for sec in sectors:
            region = sector_to_region(sec, region_start)
            if S[region] == SectorStatus.DIRTY:
                io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec,
                              SECTOR_SIZE, data[SectorStatus.DIRTY],
                              sec * SECTOR_SIZE, IoDir.WRITE)

        core_device.reset_stats()

        for s in start_sec:
            for e in end_sec:
                if s > e:
                    continue

                # issue WO read
                START = s * SECTOR_SIZE
                END = e * SECTOR_SIZE
                size = (e - s + 1) * SECTOR_SIZE
                assert 0 == io_to_exp_obj(
                    core, WORKSET_OFFSET + START, size, result_b, START,
                    IoDir.READ
                ), "error reading in WO mode: S={}, start={}, end={}, insert_order={}".format(
                    S, s, e, insert_order)

                # verify read data
                for sec in range(s, e + 1):
                    # just check the first byte of sector
                    region = sector_to_region(sec, region_start)
                    check_byte = sec * SECTOR_SIZE
                    assert (
                        result_b[check_byte] == data[S[region]][check_byte]
                    ), "unexpected data in sector {}, S={}, s={}, e={}, insert_order={}\n".format(
                        sec, S, s, e, insert_order)

                # WO is not supposed to clean dirty data
                assert (
                    core_device.get_stats()[IoDir.WRITE] == 0
                ), "unexpected write to core device, S={}, s={}, e={}, insert_order = {}\n".format(
                    S, s, e, insert_order)
Пример #27
0
def test_secure_erase_simple_io_read_misses(cache_mode):
    """
        Perform simple IO which will trigger read misses, which in turn should
        trigger backfill. Track all the data locked/copied for backfill and make
        sure OCF calls secure erase and unlock on them.
    """
    ctx = OcfCtx(
        OcfLib.getInstance(),
        b"Security tests ctx",
        DefaultLogger(LogLevel.WARN),
        DataCopyTracer,
        Cleaner,
    )

    ctx.register_volume_type(Volume)

    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device, cache_mode=cache_mode)

    core_device = Volume(S.from_MiB(50))
    core = Core.using_device(core_device)
    cache.add_core(core)

    write_data = DataCopyTracer(S.from_sector(1))
    io = core.new_io(
        cache.get_default_queue(),
        S.from_sector(1).B,
        write_data.size,
        IoDir.WRITE,
        0,
        0,
    )
    io.set_data(write_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    cmpls = []
    for i in range(100):
        read_data = DataCopyTracer(S.from_sector(1))
        io = core.new_io(
            cache.get_default_queue(),
            i * S.from_sector(1).B,
            read_data.size,
            IoDir.READ,
            0,
            0,
        )
        io.set_data(read_data)

        cmpl = OcfCompletion([("err", c_int)])
        io.callback = cmpl.callback
        cmpls.append(cmpl)
        io.submit()

    for c in cmpls:
        c.wait()

    write_data = DataCopyTracer.from_string("TEST DATA" * 100)
    io = core.new_io(cache.get_default_queue(), S.from_sector(1),
                     write_data.size, IoDir.WRITE, 0, 0)
    io.set_data(write_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    stats = cache.get_stats()

    ctx.exit()

    assert (len(DataCopyTracer.needs_erase) == 0
            ), "Not all locked Data instances were secure erased!"
    assert (len(DataCopyTracer.locked_instances) == 0
            ), "Not all locked Data instances were unlocked!"
    assert (stats["req"]["rd_partial_misses"]["value"] +
            stats["req"]["rd_full_misses"]["value"]) > 0
Пример #28
0
def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
    """ Verify if overflown pinned ioclass is evicted """
    cache_device = Volume(Size.from_MiB(35))
    core_device = Volume(Size.from_MiB(100))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=CacheMode.WT,
                                  cache_line_size=cls)
    core = Core.using_device(core_device)
    cache.add_core(core)

    test_ioclass_id = 1
    pinned_ioclass_id = 2
    pinned_ioclass_max_occupancy = 10

    cache.configure_partition(
        part_id=test_ioclass_id,
        name="default_ioclass",
        max_size=100,
        priority=1,
    )
    cache.configure_partition(
        part_id=pinned_ioclass_id,
        name="pinned_ioclass",
        max_size=pinned_ioclass_max_occupancy,
        priority=-1,
    )

    cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)

    cache_size = cache.get_stats()["conf"]["size"]

    data = Data(4096)

    # Populate cache with data
    for i in range(cache_size.blocks_4k):
        send_io(core, data, i * 4096, test_ioclass_id)

    part_current_size = CacheLines(
        cache.get_partition_info(part_id=test_ioclass_id)["_curr_size"], cls)
    assert isclose(
        part_current_size.blocks_4k,
        cache_size.blocks_4k,
        abs_tol=Size(
            cls).blocks_4k), "Failed to populate the default partition"

    # Repart - force overflow of second partition occupancy limit
    pinned_double_size = ceil(
        (cache_size.blocks_4k * pinned_ioclass_max_occupancy * 2) / 100)
    for i in range(pinned_double_size):
        send_io(core, data, i * 4096, pinned_ioclass_id)

    part_current_size = CacheLines(
        cache.get_partition_info(part_id=pinned_ioclass_id)["_curr_size"], cls)
    assert isclose(
        part_current_size.blocks_4k,
        pinned_double_size,
        abs_tol=Size(cls).blocks_4k
    ), "Occupancy of pinned ioclass doesn't match expected value"

    # Trigger IO to the default ioclass - force eviction from overlown ioclass
    for i in range(cache_size.blocks_4k):
        send_io(core, data, (cache_size.blocks_4k + i) * 4096, test_ioclass_id)

    part_current_size = CacheLines(
        cache.get_partition_info(part_id=pinned_ioclass_id)["_curr_size"], cls)
    assert isclose(
        part_current_size.blocks_4k,
        ceil(cache_size.blocks_4k * 0.1),
        abs_tol=Size(cls).blocks_4k,
    ), "Overflown part has not been evicted"
Пример #29
0
def try_start_cache(**config):
    cache_device = Volume(Size.from_MiB(30))
    cache = Cache.start_on_device(cache_device, **config)
    cache.stop()
Пример #30
0
def test_load_cache_no_preexisting_data(pyocf_ctx):
    cache_device = Volume(S.from_MiB(30))

    with pytest.raises(OcfError, match="OCF_ERR_NO_METADATA"):
        cache = Cache.load_from_device(cache_device)