Пример #1
0
def test_eviction_two_cores(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
    """Test if eviction works correctly when remapping cachelines between distinct cores."""
    cache_device = RamVolume(Size.from_MiB(50))

    core_device1 = RamVolume(Size.from_MiB(40))
    core_device2 = RamVolume(Size.from_MiB(40))
    cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
    cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
    cache_size = cache.get_stats()["conf"]["size"]
    core1 = Core.using_device(core_device1, name="core1")
    core2 = Core.using_device(core_device2, name="core2")
    cache.add_core(core1)
    vol1 = CoreVolume(core1, open=True)
    cache.add_core(core2)
    vol2 = CoreVolume(core2, open=True)

    valid_io_size = Size.from_B(cache_size.B)
    test_data = Data(valid_io_size)
    send_io(core1, test_data)
    send_io(core2, test_data)

    stats1 = core1.get_stats()
    stats2 = core2.get_stats()
    # IO to the second core should evict all the data from the first core
    assert stats1["usage"]["occupancy"]["value"] == 0
    assert stats2["usage"]["occupancy"]["value"] == valid_io_size.blocks_4k
Пример #2
0
def test_10add_remove_with_io(pyocf_ctx):
    # Start cache device
    cache_device = RamVolume(S.from_MiB(50))
    cache = Cache.start_on_device(cache_device)

    # Create core device
    core_device = RamVolume(S.from_MiB(10))
    core = Core.using_device(core_device)

    # Add and remove core 10 times in a loop with io in between
    for i in range(0, 10):
        cache.add_core(core)
        vol = CoreVolume(core, open=True)
        stats = cache.get_stats()
        assert stats["conf"]["core_count"] == 1

        write_data = Data.from_string("Test data")
        io = vol.new_io(cache.get_default_queue(),
                        S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0)
        io.set_data(write_data)

        cmpl = OcfCompletion([("err", c_int)])
        io.callback = cmpl.callback
        io.submit()
        cmpl.wait()

        cache.remove_core(core)
        stats = cache.get_stats()
        assert stats["conf"]["core_count"] == 0
Пример #3
0
def test_secure_erase_simple_io_cleaning():
    """
        Perform simple IO which will trigger WB cleaning. Track all the data from
        cleaner (locked) and make sure they are erased and unlocked after use.

        1. Start cache in WB mode
        2. Write single sector at LBA 0
        3. Read whole cache line at LBA 0
        4. Assert that 3. triggered cleaning
        5. Check if all locked Data copies were erased and unlocked
    """
    ctx = OcfCtx(
        OcfLib.getInstance(),
        b"Security tests ctx",
        DefaultLogger(LogLevel.WARN),
        DataCopyTracer,
        Cleaner,
    )

    ctx.register_volume_type(RamVolume)

    cache_device = RamVolume(S.from_MiB(50))
    cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB)

    core_device = RamVolume(S.from_MiB(100))
    core = Core.using_device(core_device)
    cache.add_core(core)
    vol = CoreVolume(core, open=True)
    queue = cache.get_default_queue()

    read_data = Data(S.from_sector(1).B)
    io = vol.new_io(queue,
                    S.from_sector(1).B, read_data.size, IoDir.WRITE, 0, 0)
    io.set_data(read_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    read_data = Data(S.from_sector(8).B)
    io = vol.new_io(queue,
                    S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0)
    io.set_data(read_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    stats = cache.get_stats()

    ctx.exit()

    assert (len(DataCopyTracer.needs_erase) == 0
            ), "Not all locked Data instances were secure erased!"
    assert (len(DataCopyTracer.locked_instances) == 0
            ), "Not all locked Data instances were unlocked!"
    assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
def test_surprise_shutdown_stop_cache(pyocf_ctx):
    core_device = RamVolume(S.from_MiB(10))
    error_triggered = True
    error_io_seq_no = 0
    io_offset = mngmt_op_surprise_shutdown_test_io_offset

    while error_triggered:
        # Start cache device without error injection
        error_io = {IoDir.WRITE: error_io_seq_no}
        device = ErrorDevice(
            mngmt_op_surprise_shutdown_test_cache_size, error_seq_no=error_io, armed=False
        )

        # setup cache and insert some data
        cache = Cache.start_on_device(device, cache_mode=CacheMode.WB)
        core = Core(device=core_device)
        cache.add_core(core)
        vol = CoreVolume(core, open=True)
        ocf_write(vol, cache.get_default_queue(), 0xAA, io_offset)

        # start error injection
        device.arm()

        try:
            cache.stop()
            status = OcfErrorCode.OCF_OK
        except OcfError as ex:
            status = ex.error_code

        # if error was injected we expect mngmt op error
        error_triggered = device.error_triggered()
        if error_triggered:
            assert status == OcfErrorCode.OCF_ERR_WRITE_CACHE
        else:
            assert status == 0

        if not error_triggered:
            break

        # disable error injection and load the cache
        device.disarm()
        cache = None

        assert core_device.get_bytes()[io_offset] == VOLUME_POISON

        cache = Cache.load_from_device(device, open_cores=False)
        stats = cache.get_stats()
        if stats["conf"]["core_count"] == 1:
            assert stats["usage"]["occupancy"]["value"] == 1
            core = Core(device=core_device)
            cache.add_core(core, try_add=True)
            vol = CoreVolume(core, open=True)
            assert ocf_read(vol, cache.get_default_queue(), io_offset) == 0xAA

        cache.stop()

        # advance error injection point
        error_io_seq_no += 1
 def prepare(cache):
     cache.add_core(core1)
     vol = CoreVolume(core1, open=True)
     cache.save()
     ocf_write(vol, cache.get_default_queue(), 0xAA, mngmt_op_surprise_shutdown_test_io_offset)
     cache.remove_core(core1)
     cache.save()
 def check_func(cache, error_triggered):
     stats = cache.get_stats()
     if stats["conf"]["core_count"] == 0:
         assert core_device.get_bytes()[io_offset] == 0xAA
     else:
         core = cache.get_core_by_name("core1")
         vol = CoreVolume(core, open=True)
         assert ocf_read(vol, cache.get_default_queue(), io_offset) == 0xAA
Пример #7
0
def test_io_flags(pyocf_ctx, cache_mode):
    """
    Verify that I/O flags provided at the top volume interface
    are propagated down to bottom volumes for all associated
    I/Os (including metadata writes to cache volume).
    """

    flags = 0x239482
    block_size = 4096

    data = bytes(block_size)

    cache_device = FlagsValVolume(Size.from_MiB(50), flags)
    core_device = FlagsValVolume(Size.from_MiB(50), flags)

    cache = Cache.start_on_device(cache_device, cache_mode=cache_mode)
    core = Core.using_device(core_device)

    cache.add_core(core)
    vol = CoreVolume(core, open=True)

    cache_device.set_check(True)
    core_device.set_check(True)

    # write miss
    io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE,
                  flags)
    assert not cache_device.fail
    assert not core_device.fail

    # read miss
    io_to_exp_obj(core, block_size * 1, block_size, data, 0, IoDir.READ, flags)
    assert not cache_device.fail
    assert not core_device.fail

    # "dirty" read hit
    io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.READ, flags)
    assert not cache_device.fail
    assert not core_device.fail

    # "clean" read hit
    io_to_exp_obj(core, block_size * 1, block_size, data, 0, IoDir.READ, flags)
    assert not cache_device.fail
    assert not core_device.fail

    # "dirty" write hit
    io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE,
                  flags)
    assert not cache_device.fail
    assert not core_device.fail

    # "clean" write hit
    io_to_exp_obj(core, block_size * 1, block_size, data, 0, IoDir.WRITE,
                  flags)
    assert not cache_device.fail
    assert not core_device.fail
Пример #8
0
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
    """Test if eviction does not occur when IO greater than cache size is submitted."""
    cache_device = RamVolume(Size.from_MiB(50))

    core_device = RamVolume(Size.from_MiB(200))
    cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
    cache_size = cache.get_stats()["conf"]["size"]
    core = Core.using_device(core_device)
    cache.add_core(core)
    vol = CoreVolume(core, open=True)
    cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)

    valid_io_size = Size.from_B(cache_size.B // 2)
    test_data = Data(valid_io_size)
    send_io(core, test_data)

    stats = core.cache.get_stats()
    first_block_sts = stats["block"]
    first_usage_sts = stats["usage"]
    pt_writes_first = stats["req"]["wr_pt"]
    assert stats["usage"]["occupancy"]["value"] == (
        valid_io_size.B / Size.from_KiB(4).B
    ), "Occupancy after first IO"
    prev_writes_to_core = stats["block"]["core_volume_wr"]["value"]

    # Anything below 200 MiB is a valid size (less than core device size)
    # Writing over cache size (to the offset above first io) in this case should go
    # directly to core and shouldn't trigger eviction
    io_size_bigger_than_cache = Size.from_MiB(100)
    io_offset = valid_io_size
    test_data = Data(io_size_bigger_than_cache)
    send_io(core, test_data, io_offset)

    if mode is not CacheMode.WT:
        # Flush first write
        cache.flush()
    stats = core.cache.get_stats()
    second_block_sts = stats["block"]
    second_usage_sts = stats["usage"]
    pt_writes_second = stats["req"]["wr_pt"]

    # Second write shouldn't affect cache and should go directly to core.
    # Cache occupancy shouldn't change
    # Second IO should go in PT
    assert first_usage_sts["occupancy"] == second_usage_sts["occupancy"]
    assert pt_writes_first["value"] == 0
    assert pt_writes_second["value"] == 1
    assert second_block_sts["cache_volume_wr"]["value"] == valid_io_size.blocks_4k
    assert (
        second_block_sts["core_volume_wr"]["value"]
        == valid_io_size.blocks_4k + io_size_bigger_than_cache.blocks_4k
    )
Пример #9
0
def test_attach_different_size(pyocf_ctx, new_cache_size, mode: CacheMode,
                               cls: CacheLineSize):
    """Start cache and add partition with limited occupancy. Fill partition with data,
    attach cache with different size and trigger IO. Verify if occupancy thresold is
    respected with both original and new cache device.
    """
    cache_device = RamVolume(Size.from_MiB(100))
    core_device = RamVolume(Size.from_MiB(100))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=mode,
                                  cache_line_size=cls)
    core = Core.using_device(core_device)
    cache.add_core(core)

    vol = CoreVolume(core, open=True)
    queue = cache.get_default_queue()

    cache.configure_partition(part_id=1,
                              name="test_part",
                              max_size=50,
                              priority=1)

    cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)

    cache_size = cache.get_stats()["conf"]["size"]

    block_size = 4096
    data = bytes(block_size)

    for i in range(cache_size.blocks_4k):
        io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0,
                      IoDir.WRITE, 1, 0)

    part_current_size = CacheLines(
        cache.get_partition_info(part_id=1)["_curr_size"], cls)

    assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5

    cache.detach_device()
    new_cache_device = RamVolume(Size.from_MiB(new_cache_size))
    cache.attach_device(new_cache_device, force=True)

    cache_size = cache.get_stats()["conf"]["size"]

    for i in range(cache_size.blocks_4k):
        io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0,
                      IoDir.WRITE, 1, 0)

    part_current_size = CacheLines(
        cache.get_partition_info(part_id=1)["_curr_size"], cls)

    assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
Пример #10
0
def test_flush_after_mngmt(pyocf_ctx):
    """
    Check whether underlying volumes volatile caches (VC) are flushed after management operation
    """
    block_size = 4096

    data = bytes(block_size)

    cache_device = FlushValVolume(Size.from_MiB(50))
    core_device = FlushValVolume(Size.from_MiB(50))

    # after start cache VC must be cleared
    cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT)
    assert cache_device.flush_last

    # adding core must flush VC
    core = Core.using_device(core_device)
    cache.add_core(core)
    assert cache_device.flush_last

    vol = CoreVolume(core, open=True)
    queue = cache.get_default_queue()

    # WT I/O to write data to core and cache VC
    io_to_exp_obj(vol, queue, block_size * 0, block_size, data, 0, IoDir.WRITE,
                  0)

    # WB I/O to produce dirty cachelines in CAS
    cache.change_cache_mode(CacheMode.WB)
    io_to_exp_obj(vol, queue, block_size * 1, block_size, data, 0, IoDir.WRITE,
                  0)

    # after cache flush VCs are expected to be cleared
    cache.flush()
    assert cache_device.flush_last
    assert core_device.flush_last

    # I/O to write data to cache device VC
    io_to_exp_obj(vol, queue, block_size * 0, block_size, data, 0, IoDir.WRITE,
                  0)

    # cache save must flush VC
    cache.save()
    assert cache_device.flush_last

    # I/O to write data to cache device VC
    io_to_exp_obj(vol, queue, block_size * 0, block_size, data, 0, IoDir.WRITE,
                  0)

    # cache stop must flush VC
    cache.stop()
    assert cache_device.flush_last
Пример #11
0
def prepare_cache_and_core(core_size: Size,
                           cache_size: Size = Size.from_MiB(50)):
    cache_device = RamVolume(cache_size)
    core_device = RamVolume(core_size)

    cache = Cache.start_on_device(cache_device)
    core = Core.using_device(core_device)

    cache.add_core(core)
    vol = CoreVolume(core, open=True)
    queue = cache.get_default_queue()

    return vol, queue
Пример #12
0
def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize,
              with_flush: bool):
    """Stopping cache.
    Check if cache is stopped properly in different modes with or without preceding flush operation.
    """

    cache_device = RamVolume(Size.from_MiB(50))
    core_device = RamVolume(Size.from_MiB(5))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=mode,
                                  cache_line_size=cls)
    core = Core.using_device(core_device)

    cache.add_core(core)
    front_vol = CoreVolume(core, open=True)
    queue = cache.get_default_queue()

    cls_no = 10

    run_io_and_cache_data_if_possible(core, mode, cls, cls_no)

    stats = cache.get_stats()
    assert int(stats["conf"]["dirty"]) == (cls_no if mode.lazy_write() else 0),\
        "Dirty data before MD5"

    md5_exported_core = front_vol.md5()

    if with_flush:
        cache.flush()
    cache.stop()

    if mode.lazy_write() and not with_flush:
        assert core_device.md5() != md5_exported_core, \
            "MD5 check: core device vs exported object with dirty data"
    else:
        assert core_device.md5() == md5_exported_core, \
            "MD5 check: core device vs exported object with clean data"
Пример #13
0
def test_simple_wt_write(pyocf_ctx):
    cache_device = RamVolume(S.from_MiB(50))
    core_device = RamVolume(S.from_MiB(50))

    cache = Cache.start_on_device(cache_device)
    core = Core.using_device(core_device)
    queue = cache.get_default_queue()

    cache.add_core(core)
    vol = CoreVolume(core, open=True)

    cache_device.reset_stats()
    core_device.reset_stats()

    r = Rio().target(vol).readwrite(ReadWrite.WRITE).size(
        S.from_sector(1)).run([queue])
    assert cache_device.get_stats()[IoDir.WRITE] == 1
    cache.settle()
    stats = cache.get_stats()
    assert stats["req"]["wr_full_misses"]["value"] == 1
    assert stats["usage"]["occupancy"]["value"] == 1

    assert vol.md5() == core_device.md5()
    cache.stop()
Пример #14
0
def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode,
                                         cls: CacheLineSize):
    """Starting cache in different modes with different cache line sizes.
    After start check proper cache mode behaviour, starting with read operation.
    """

    cache_device = RamVolume(Size.from_MiB(50))
    core_device = RamVolume(Size.from_MiB(5))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=mode,
                                  cache_line_size=cls)
    core = Core.using_device(core_device)

    cache.add_core(core)
    front_vol = CoreVolume(core, open=True)
    bottom_vol = core.get_volume()
    queue = cache.get_default_queue()

    logger.info("[STAGE] Initial write to core device")
    test_data = Data.from_string("This is test data")
    io_to_core(bottom_vol, queue, test_data, Size.from_sector(1).B)

    cache_device.reset_stats()
    core_device.reset_stats()

    logger.info("[STAGE] Initial read from exported object")
    io_from_exported_object(front_vol, queue, test_data.size,
                            Size.from_sector(1).B)
    check_stats_read_empty(core, mode, cls)

    logger.info("[STAGE] Write to exported object after initial read")
    cache_device.reset_stats()
    core_device.reset_stats()

    test_data = Data.from_string("Changed test data")

    io_to_core(front_vol, queue, test_data, Size.from_sector(1).B)

    check_stats_write_after_read(core, mode, cls, True)

    logger.info("[STAGE] Read from exported object after write")
    io_from_exported_object(front_vol, queue, test_data.size,
                            Size.from_sector(1).B)
    check_stats_read_after_write(core, mode, cls)

    check_md5_sums(core, mode)
    def check_func(cache, error_triggered):
        stats = cache.get_stats()
        assert stats["conf"]["core_count"] == (0 if error_triggered else 1)

        with pytest.raises(OcfError):
            core1 = cache.get_core_by_name("core1")

        core2 = None
        if error_triggered:
            with pytest.raises(OcfError):
                core2 = cache.get_core_by_name("core2")
        else:
            core2 = cache.get_core_by_name("core2")

        if core2 is not None:
            vol2 = CoreVolume(core2, open=True)
            assert core2.device.uuid == "dev2"
            assert (
                ocf_read(vol2, cache.get_default_queue(), mngmt_op_surprise_shutdown_test_io_offset)
                == VOLUME_POISON
            )
Пример #16
0
def test_load_cache_with_cores(pyocf_ctx, open_cores):
    cache_device = RamVolume(S.from_MiB(40))
    core_device = RamVolume(S.from_MiB(40))

    cache = Cache.start_on_device(cache_device)
    core = Core.using_device(core_device, name="test_core")

    cache.add_core(core)
    vol = CoreVolume(core, open=True)

    write_data = Data.from_string("This is test data")
    io = vol.new_io(cache.get_default_queue(),
                    S.from_sector(3).B, write_data.size, IoDir.WRITE, 0, 0)
    io.set_data(write_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    cache.stop()

    cache = Cache.load_from_device(cache_device, open_cores=open_cores)
    if not open_cores:
        cache.add_core(core, try_add=True)
    else:
        core = cache.get_core_by_name("test_core")

    vol = CoreVolume(core, open=True)

    read_data = Data(write_data.size)
    io = vol.new_io(cache.get_default_queue(),
                    S.from_sector(3).B, read_data.size, IoDir.READ, 0, 0)
    io.set_data(read_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    assert read_data.md5() == write_data.md5()
    assert vol.md5() == core_device.md5()
Пример #17
0
def test_remove_dirty_no_flush(pyocf_ctx, cache_mode, cls):
    # Start cache device
    cache_device = RamVolume(S.from_MiB(50))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=cache_mode,
                                  cache_line_size=cls)

    # Create core device
    core_device = RamVolume(S.from_MiB(10))
    core = Core.using_device(core_device)
    cache.add_core(core)

    vol = CoreVolume(core, open=True)
    queue = core.cache.get_default_queue()

    # Prepare data
    core_size = core.get_stats()["size"]
    data = Data(core_size.B)

    _io_to_core(vol, queue, data)

    # Remove core from cache
    cache.remove_core(core)
Пример #18
0
def test_secure_erase_simple_io_read_misses(cache_mode):
    """
        Perform simple IO which will trigger read misses, which in turn should
        trigger backfill. Track all the data locked/copied for backfill and make
        sure OCF calls secure erase and unlock on them.
    """
    ctx = OcfCtx(
        OcfLib.getInstance(),
        b"Security tests ctx",
        DefaultLogger(LogLevel.WARN),
        DataCopyTracer,
        Cleaner,
    )

    ctx.register_volume_type(RamVolume)

    cache_device = RamVolume(S.from_MiB(50))
    cache = Cache.start_on_device(cache_device, cache_mode=cache_mode)

    core_device = RamVolume(S.from_MiB(50))
    core = Core.using_device(core_device)
    cache.add_core(core)
    vol = CoreVolume(core, open=True)
    queue = cache.get_default_queue()

    write_data = DataCopyTracer(S.from_sector(1))
    io = vol.new_io(
        queue,
        S.from_sector(1).B,
        write_data.size,
        IoDir.WRITE,
        0,
        0,
    )
    io.set_data(write_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    cmpls = []
    for i in range(100):
        read_data = DataCopyTracer(S.from_sector(1))
        io = vol.new_io(
            queue,
            i * S.from_sector(1).B,
            read_data.size,
            IoDir.READ,
            0,
            0,
        )
        io.set_data(read_data)

        cmpl = OcfCompletion([("err", c_int)])
        io.callback = cmpl.callback
        cmpls.append(cmpl)
        io.submit()

    for c in cmpls:
        c.wait()

    write_data = DataCopyTracer.from_string("TEST DATA" * 100)
    io = vol.new_io(queue, S.from_sector(1), write_data.size, IoDir.WRITE, 0,
                    0)
    io.set_data(write_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    stats = cache.get_stats()

    ctx.exit()

    assert (len(DataCopyTracer.needs_erase) == 0
            ), "Not all locked Data instances were secure erased!"
    assert (len(DataCopyTracer.locked_instances) == 0
            ), "Not all locked Data instances were unlocked!"
    assert (stats["req"]["rd_partial_misses"]["value"] +
            stats["req"]["rd_full_misses"]["value"]) > 0
Пример #19
0
def test_seq_cutoff_max_streams(pyocf_ctx):
    """
    Test number of sequential streams tracked by OCF.

    MAX_STREAMS is the maximal amount of streams which OCF is able to track.

    1. Issue MAX_STREAMS requests (write or reads) to cache, 1 sector shorter than
        seq cutoff threshold
    2. Issue MAX_STREAMS-1 requests continuing the streams from 1. to surpass the threshold and
        check if cutoff was triggered (requests used PT engine)
    3. Issue single request to stream not used in 1. or 2. and check if it's been handled by cache
    4. Issue single request to stream least recently used in 1. and 2. and check if it's been
        handled by cache. It should no longer be tracked by OCF, because of request in step 3. which
        overflowed the OCF handling structure)
    """
    MAX_STREAMS = 128
    TEST_STREAMS = MAX_STREAMS + 1  # Number of streams used by test - one more than OCF can track
    core_size = Size.from_MiB(200)
    threshold = Size.from_KiB(4)

    streams = [
        Stream(
            last=Size((stream_no * int(core_size) // TEST_STREAMS),
                      sector_aligned=True),
            length=Size(0),
            direction=choice(list(IoDir)),
        ) for stream_no in range(TEST_STREAMS)
    ]  # Generate MAX_STREAMS + 1 non-overlapping streams

    # Remove one stream - this is the one we are going to use to overflow OCF tracking structure
    # in step 3
    non_active_stream = choice(streams)
    streams.remove(non_active_stream)

    cache = Cache.start_on_device(RamVolume(Size.from_MiB(200)),
                                  cache_mode=CacheMode.WT)
    core = Core.using_device(RamVolume(core_size),
                             seq_cutoff_promotion_count=1)

    cache.add_core(core)
    vol = CoreVolume(core, open=True)
    queue = cache.get_default_queue()

    cache.set_seq_cut_off_policy(SeqCutOffPolicy.ALWAYS)
    cache.set_seq_cut_off_threshold(threshold)

    # STEP 1
    shuffle(streams)
    io_size = threshold - Size.from_sector(1)
    io_to_streams(vol, queue, streams, io_size)

    stats = cache.get_stats()
    assert (stats["req"]["serviced"]["value"] == stats["req"]["total"]["value"]
            == len(streams)), "All request should be serviced - no cutoff"

    old_serviced = len(streams)

    # STEP 2
    lru_stream = streams[0]
    streams.remove(lru_stream)

    shuffle(streams)
    io_to_streams(vol, queue, streams, Size.from_sector(1))

    stats = cache.get_stats()
    assert (
        stats["req"]["serviced"]["value"] == old_serviced
    ), "Serviced requests stat should not increase - cutoff engaged for all"
    assert stats["req"]["wr_pt"]["value"] + stats["req"]["rd_pt"][
        "value"] == len(
            streams
        ), "All streams should be handled in PT - cutoff engaged for all streams"

    # STEP 3
    io_to_streams(vol, queue, [non_active_stream], Size.from_sector(1))

    stats = cache.get_stats()
    assert (
        stats["req"]["serviced"]["value"] == old_serviced + 1
    ), "This request should be serviced by cache - no cutoff for inactive stream"

    # STEP 4
    io_to_streams(vol, queue, [lru_stream], Size.from_sector(1))

    stats = cache.get_stats()
    assert (
        stats["req"]["serviced"]["value"] == old_serviced + 2
    ), "This request should be serviced by cache - lru_stream should be no longer tracked"
 def prepare_func(cache):
     cache.add_core(core)
     vol = CoreVolume(core, open=True)
     ocf_write(vol, cache.get_default_queue(), 0xAA, io_offset)
Пример #21
0
def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode,
                               rand_seed):
    CACHELINE_COUNT = 9
    SECTOR_SIZE = Size.from_sector(1).B
    CLS = cacheline_size // SECTOR_SIZE
    WORKSET_SIZE = CACHELINE_COUNT * cacheline_size
    WORKSET_OFFSET = 128 * cacheline_size
    SECTOR_COUNT = int(WORKSET_SIZE / SECTOR_SIZE)
    ITRATION_COUNT = 50

    random.seed(rand_seed)

    # start sector for each region (positions of '*' on the above diagram)
    region_start = ([0, 3 * CLS, 4 * CLS - 1] +
                    [4 * CLS + i
                     for i in range(CLS)] + [5 * CLS, 5 * CLS + 1, 6 * CLS])
    num_regions = len(region_start)
    # possible IO start sectors for test iteration  (positions of '>' on the above diagram)
    start_sec = [0, CLS, 2 * CLS, 3 * CLS, 4 * CLS - 2, 4 * CLS - 1
                 ] + [4 * CLS + i for i in range(CLS)]
    # possible IO end sectors for test iteration (positions o '<' on the above diagram)
    end_sec = ([3 * CLS - 1] + [4 * CLS + i for i in range(CLS)] + [
        5 * CLS, 5 * CLS + 1, 6 * CLS - 1, 7 * CLS - 1, 8 * CLS - 1,
        9 * CLS - 1
    ])

    data = {}
    # memset n-th sector of core data with n << 2
    data[SectorStatus.INVALID] = bytes([
        get_byte(((x // SECTOR_SIZE) << 2) + 0, x % 4)
        for x in range(WORKSET_SIZE)
    ])
    # memset n-th sector of clean data with n << 2 + 1
    data[SectorStatus.CLEAN] = bytes([
        get_byte(((x // SECTOR_SIZE) << 2) + 1, x % 4)
        for x in range(WORKSET_SIZE)
    ])
    # memset n-th sector of dirty data with n << 2 + 2
    data[SectorStatus.DIRTY] = bytes([
        get_byte(((x // SECTOR_SIZE) << 2) + 2, x % 4)
        for x in range(WORKSET_SIZE)
    ])

    result_b = bytes(WORKSET_SIZE)

    cache_device = RamVolume(Size.from_MiB(50))
    core_device = RamVolume(Size.from_MiB(50))

    cache = Cache.start_on_device(cache_device,
                                  cache_mode=CacheMode.WO,
                                  cache_line_size=cacheline_size)

    core = Core.using_device(core_device)
    cache.add_core(core)
    queue = cache.get_default_queue()
    vol = CoreVolume(core, open=True)

    insert_order = list(range(CACHELINE_COUNT))

    # set fixed generated sector statuses
    region_statuses = [
        [I, I, I] + [I for i in range(CLS)] + [I, I, I],
        [I, I, I] + [D for i in range(CLS)] + [I, I, I],
        [I, I, I] + [C for i in range(CLS)] + [I, I, I],
        [I, I, I] + [D for i in range(CLS // 2 - 1)] + [I] +
        [D for i in range(CLS // 2)] + [I, I, I],
        [I, I, I] + [D for i in range(CLS // 2 - 1)] + [I, I] +
        [D for i in range(CLS // 2 - 1)] + [I, I, I],
        [I, I, I] + [D for i in range(CLS // 2 - 2)] + [I, I, D, C] +
        [D for i in range(CLS // 2 - 2)] + [I, I, I],
        [I, I, D] + [D for i in range(CLS)] + [D, I, I],
        [I, I, D] + [D for i in range(CLS // 2 - 1)] + [I] +
        [D for i in range(CLS // 2)] + [D, I, I],
    ]

    # add randomly generated sector statuses
    for _ in range(ITRATION_COUNT - len(region_statuses)):
        region_statuses.append(
            [random.choice(list(SectorStatus)) for _ in range(num_regions)])

    # iterate over generated status combinations and perform the test
    for region_state in region_statuses:
        # write data to core and invalidate all CL and write data pattern to core
        cache.change_cache_mode(cache_mode=CacheMode.PT)
        io_to_exp_obj(
            vol,
            queue,
            WORKSET_OFFSET,
            len(data[SectorStatus.INVALID]),
            data[SectorStatus.INVALID],
            0,
            IoDir.WRITE,
        )

        # randomize cacheline insertion order to exercise different
        # paths with regard to cache I/O physical addresses continuousness
        random.shuffle(insert_order)
        sectors = [
            insert_order[i // CLS] * CLS + (i % CLS)
            for i in range(SECTOR_COUNT)
        ]

        # insert clean sectors - iterate over cachelines in @insert_order order
        cache.change_cache_mode(cache_mode=CacheMode.WT)
        for sec in sectors:
            region = sector_to_region(sec, region_start)
            if region_state[region] != SectorStatus.INVALID:
                io_to_exp_obj(
                    vol,
                    queue,
                    WORKSET_OFFSET + SECTOR_SIZE * sec,
                    SECTOR_SIZE,
                    data[SectorStatus.CLEAN],
                    sec * SECTOR_SIZE,
                    IoDir.WRITE,
                )

        # write dirty sectors
        cache.change_cache_mode(cache_mode=CacheMode.WB)
        for sec in sectors:
            region = sector_to_region(sec, region_start)
            if region_state[region] == SectorStatus.DIRTY:
                io_to_exp_obj(
                    vol,
                    queue,
                    WORKSET_OFFSET + SECTOR_SIZE * sec,
                    SECTOR_SIZE,
                    data[SectorStatus.DIRTY],
                    sec * SECTOR_SIZE,
                    IoDir.WRITE,
                )

        cache.change_cache_mode(cache_mode=cache_mode)

        core_device.reset_stats()

        # get up to 32 randomly selected pairs of (start,end) sectors
        # 32 is enough to cover all combinations for 4K and 8K cacheline size
        io_ranges = [(s, e) for s, e in product(start_sec, end_sec) if s < e]
        random.shuffle(io_ranges)
        io_ranges = io_ranges[:32]

        # run the test for each selected IO range for currently set up region status
        for start, end in io_ranges:
            print_test_case(region_start, region_state, start, end,
                            SECTOR_COUNT, CLS)

            # issue read
            START = start * SECTOR_SIZE
            END = end * SECTOR_SIZE
            size = (end - start + 1) * SECTOR_SIZE
            assert 0 == io_to_exp_obj(
                vol, queue, WORKSET_OFFSET + START, size, result_b, START,
                IoDir.READ
            ), "error reading in {}: region_state={}, start={}, end={}, insert_order={}".format(
                cache_mode, region_state, start, end, insert_order)

            # verify read data
            for sec in range(start, end + 1):
                # just check the first 32bits of sector (this is the size of fill pattern)
                region = sector_to_region(sec, region_start)
                start_byte = sec * SECTOR_SIZE
                expected_data = bytes_to_uint32(
                    data[region_state[region]][start_byte + 0],
                    data[region_state[region]][start_byte + 1],
                    data[region_state[region]][start_byte + 2],
                    data[region_state[region]][start_byte + 3],
                )
                actual_data = bytes_to_uint32(
                    result_b[start_byte + 0],
                    result_b[start_byte + 1],
                    result_b[start_byte + 2],
                    result_b[start_byte + 3],
                )

                assert (
                    actual_data == expected_data
                ), "unexpected data in sector {}, region_state={}, start={}, end={}, insert_order={}\n".format(
                    sec, region_state, start, end, insert_order)

            if cache_mode == CacheMode.WO:
                # WO is not supposed to clean dirty data
                assert (
                    core_device.get_stats()[IoDir.WRITE] == 0
                ), "unexpected write to core device, region_state={}, start={}, end={}, insert_order = {}\n".format(
                    region_state, start, end, insert_order)
def test_surprise_shutdown_cache_reinit(pyocf_ctx):
    core_device = RamVolume(S.from_MiB(10))

    error_io = {IoDir.WRITE: 0}

    io_offset = mngmt_op_surprise_shutdown_test_io_offset

    error_triggered = True
    while error_triggered:
        # Start cache device without error injection
        device = ErrorDevice(
            mngmt_op_surprise_shutdown_test_cache_size, error_seq_no=error_io, armed=False
        )

        # start WB
        cache = Cache.start_on_device(device, cache_mode=CacheMode.WB)
        core = Core(device=core_device)
        cache.add_core(core)
        vol = CoreVolume(core, open=True)
        queue = cache.get_default_queue()

        # insert dirty cacheline
        ocf_write(vol, queue, 0xAA, io_offset)

        cache.stop()

        assert core_device.get_bytes()[io_offset] == VOLUME_POISON

        # start error injection
        device.arm()

        # power failure during cache re-initialization
        try:
            # sets force = True by default
            cache = Cache.start_on_device(device, cache_mode=CacheMode.WB)
            status = OcfErrorCode.OCF_OK
        except OcfError as ex:
            status = ex.error_code
            cache = None

        error_triggered = device.error_triggered()
        assert error_triggered == (status == OcfErrorCode.OCF_ERR_WRITE_CACHE)

        if cache:
            with pytest.raises(OcfError) as ex:
                cache.stop()
            assert ex.value.error_code == OcfErrorCode.OCF_ERR_WRITE_CACHE

        device.disarm()

        cache = None
        status = OcfErrorCode.OCF_OK
        try:
            cache = Cache.load_from_device(device)
        except OcfError as ex:
            status = ex.error_code

        if not cache:
            assert status == OcfErrorCode.OCF_ERR_NO_METADATA
        else:
            stats = cache.get_stats()
            if stats["conf"]["core_count"] == 0:
                assert stats["usage"]["occupancy"]["value"] == 0
                cache.add_core(core)
                vol = CoreVolume(core, open=True)
                assert ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON

            cache.stop()

        error_io[IoDir.WRITE] += 1
Пример #23
0
def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
    """ Verify if overflown pinned ioclass is evicted """
    cache_device = RamVolume(Size.from_MiB(50))
    core_device = RamVolume(Size.from_MiB(100))
    cache = Cache.start_on_device(
        cache_device, cache_mode=CacheMode.WT, cache_line_size=cls
    )
    core = Core.using_device(core_device)
    cache.add_core(core)
    vol = CoreVolume(core, open=True)

    test_ioclass_id = 1
    pinned_ioclass_id = 2
    pinned_ioclass_max_occupancy = 10

    cache.configure_partition(
        part_id=test_ioclass_id,
        name="default_ioclass",
        max_size=100,
        priority=1,
    )
    cache.configure_partition(
        part_id=pinned_ioclass_id,
        name="pinned_ioclass",
        max_size=pinned_ioclass_max_occupancy,
        priority=-1,
    )

    cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)

    cache_size = cache.get_stats()["conf"]["size"]

    data = Data(4096)

    # Populate cache with data
    for i in range(cache_size.blocks_4k):
        send_io(core, data, i * 4096, test_ioclass_id)

    part_current_size = CacheLines(
        cache.get_partition_info(part_id=test_ioclass_id)["_curr_size"], cls
    )
    assert isclose(
        part_current_size.blocks_4k, cache_size.blocks_4k, abs_tol=Size(cls).blocks_4k
    ), "Failed to populate the default partition"

    # Repart - force overflow of second partition occupancy limit
    pinned_double_size = ceil(
        (cache_size.blocks_4k * pinned_ioclass_max_occupancy * 2) / 100
    )
    for i in range(pinned_double_size):
        send_io(core, data, i * 4096, pinned_ioclass_id)

    part_current_size = CacheLines(
        cache.get_partition_info(part_id=pinned_ioclass_id)["_curr_size"], cls
    )
    assert isclose(
        part_current_size.blocks_4k, pinned_double_size, abs_tol=Size(cls).blocks_4k
    ), "Occupancy of pinned ioclass doesn't match expected value"

    # Trigger IO to the default ioclass - force eviction from overlown ioclass
    for i in range(cache_size.blocks_4k):
        send_io(core, data, (cache_size.blocks_4k + i) * 4096, test_ioclass_id)

    part_current_size = CacheLines(
        cache.get_partition_info(part_id=pinned_ioclass_id)["_curr_size"], cls
    )
    assert isclose(
        part_current_size.blocks_4k,
        ceil(cache_size.blocks_4k * 0.1),
        abs_tol=Size(cls).blocks_4k,
    ), "Overflown part has not been evicted"