Exemple #1
0
def test_neg_write_offset_outside_of_device(pyocf_ctx, c_int_randomize):
    """
        Check that write operations are blocked when
        IO offset is located outside of device range
    """

    core = prepare_cache_and_core(Size.from_MiB(2))
    data = Data(int(Size.from_KiB(1)))
    completion = io_operation(core, data, IoDir.WRITE, offset=c_int_randomize)

    if 0 <= c_int_randomize <= int(Size.from_MiB(2)) - int(Size.from_KiB(1)):
        assert completion.results["err"] == 0
    else:
        assert completion.results["err"] != 0
Exemple #2
0
def test_neg_read_too_far(pyocf_ctx, c_uint16_randomize):
    """
        Check if reading data which would normally fit on exported object is
        blocked when offset is set so that data is read beyond exported device end
    """

    limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1)
    core = prepare_cache_and_core(Size.from_MiB(4))
    data = Data(int(Size.from_KiB(limited_size)))
    completion = io_operation(core, data, IoDir.READ, offset=(Size.from_MiB(3)))

    if limited_size > 1024:
        assert completion.results["err"] != 0
    else:
        assert completion.results["err"] == 0
Exemple #3
0
def test_neg_write_too_far(pyocf_ctx, c_uint16_randomize):
    """
        Check if writing data which would normally fit on exported object is
        blocked when offset is set so that data goes over exported device end
    """

    limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1)
    vol, queue = prepare_cache_and_core(Size.from_MiB(4))
    data = Data(int(Size.from_KiB(limited_size)))
    completion = io_operation(vol, queue, data, IoDir.WRITE,
                              int(Size.from_MiB(3)))

    if limited_size > 1024:
        assert completion.results["err"] != 0
    else:
        assert completion.results["err"] == 0
Exemple #4
0
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode,
                                       cls: CacheLineSize):
    """Test if eviction does not occur when IO greater than cache size is submitted.
    """
    cache_device = Volume(
        Size.from_MiB(20))  # this gives about 1.375 MiB actual caching space

    core_device = Volume(Size.from_MiB(5))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=mode,
                                  cache_line_size=cls)
    core_exported = Core.using_device(core_device)
    cache.add_core(core_exported)
    cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)

    valid_io_size = Size.from_KiB(512)
    test_data = Data(valid_io_size)
    send_io(core_exported, test_data)

    stats = core_exported.cache.get_stats()
    assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\
        "Occupancy after first IO"
    prev_writes_to_core = stats["block"]["core_volume_wr"]["value"]

    # Anything below 5 MiB is a valid size (less than core device size)
    # Writing over 1.375 MiB in this case should go directly to core and shouldn't trigger eviction
    io_size_bigger_than_cache = Size.from_MiB(2)
    test_data = Data(io_size_bigger_than_cache)
    send_io(core_exported, test_data)

    stats = core_exported.cache.get_stats()

    # Writes from IO greater than cache size should go directly to core
    # Writes to core should equal the following:
    # Previous writes to core + size written + size cleaned (reads from cache)
    assert stats["block"]["core_volume_wr"]["value"] == \
        stats["block"]["cache_volume_rd"]["value"] + \
        prev_writes_to_core + io_size_bigger_than_cache.B / Size.from_KiB(4).B, \
        "Writes to core after second IO"

    # Occupancy shouldn't change (no eviction)
    assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\
        "Occupancy after second IO"
Exemple #5
0
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode,
                                       cls: CacheLineSize):
    """Test if eviction does not occur when IO greater than cache size is submitted.
    """
    cache_device = Volume(Size.from_MiB(20))

    core_device = Volume(Size.from_MiB(5))
    cache = Cache.start_on_device(cache_device,
                                  cache_mode=mode,
                                  cache_line_size=cls)
    cache_size = cache.get_stats()['conf']['size']
    core_exported = Core.using_device(core_device)
    cache.add_core(core_exported)
    cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)

    valid_io_size = Size.from_B(cache_size.B // 2)
    test_data = Data(valid_io_size)
    send_io(core_exported, test_data)

    stats = core_exported.cache.get_stats()
    first_block_sts = stats['block']
    first_usage_sts = stats['usage']
    pt_writes_first = stats['req']['wr_pt']
    assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\
        "Occupancy after first IO"
    prev_writes_to_core = stats["block"]["core_volume_wr"]["value"]

    # Anything below 5 MiB is a valid size (less than core device size)
    # Writing over cache size (to the offset above first io) in this case should go
    # directly to core and shouldn't trigger eviction
    io_size_bigger_than_cache = Size.from_MiB(2)
    io_offset = valid_io_size
    test_data = Data(io_size_bigger_than_cache)
    send_io(core_exported, test_data, io_offset)

    if mode is not CacheMode.WT:
        # Flush first write
        cache.flush()
    stats = core_exported.cache.get_stats()
    second_block_sts = stats['block']
    second_usage_sts = stats['usage']
    pt_writes_second = stats['req']['wr_pt']

    # Second write shouldn't affect cache and should go directly to core.
    # Cache occupancy shouldn't change
    # Second IO should go in PT
    assert first_usage_sts['occupancy'] == \
        second_usage_sts['occupancy']
    assert pt_writes_first['value'] == 0
    assert pt_writes_second['value'] == 1
    assert second_block_sts['cache_volume_wr'][
        'value'] == valid_io_size.blocks_4k
    assert second_block_sts['core_volume_wr']['value'] == valid_io_size.blocks_4k + \
        io_size_bigger_than_cache.blocks_4k
Exemple #6
0
def test_neg_offset_unaligned(pyocf_ctx, c_int_randomize):
    """
        Check that write operations are blocked when
        IO offset is not aligned
    """

    core = prepare_cache_and_core(Size.from_MiB(2))
    data = Data(int(Size.from_KiB(1)))
    if c_int_randomize % 512 != 0:
        with pytest.raises(Exception, match="Failed to create io!"):
            core.new_io(core.cache.get_default_queue(), c_int_randomize,
                        data.size, IoDir.WRITE, 0, 0)
Exemple #7
0
def test_neg_offset_unaligned(pyocf_ctx, c_int_randomize):
    """
        Check that write operations are blocked when
        IO offset is not aligned
    """

    vol, queue = prepare_cache_and_core(Size.from_MiB(2))
    vol = vol.get_front_volume()
    data = Data(int(Size.from_KiB(1)))
    if c_int_randomize % 512 != 0:
        with pytest.raises(Exception):
            vol.new_io(queue, c_int_randomize, data.size, IoDir.WRITE, 0, 0)
Exemple #8
0
def test_secure_erase_simple_io_cleaning():
    """
        Perform simple IO which will trigger WB cleaning. Track all the data from
        cleaner (locked) and make sure they are erased and unlocked after use.
    """
    ctx = OcfCtx(
        OcfLib.getInstance(),
        b"Security tests ctx",
        DefaultLogger(LogLevel.WARN),
        DataCopyTracer,
        MetadataUpdater,
        Cleaner,
    )

    ctx.register_volume_type(Volume)

    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB)

    core_device = Volume(S.from_MiB(100))
    core = Core.using_device(core_device)
    cache.add_core(core)

    cmpls = []
    for i in range(10000):
        read_data = Data(S.from_KiB(120))
        io = core.new_io()
        io.set_data(read_data)
        io.configure(
            (i * 1259) % int(core_device.size), read_data.size, IoDir.WRITE, 0, 0
        )
        io.set_queue(cache.get_default_queue())

        cmpl = OcfCompletion([("err", c_int)])
        io.callback = cmpl.callback
        cmpls.append(cmpl)
        io.submit()

    for c in cmpls:
        c.wait()

    stats = cache.get_stats()

    ctx.exit()

    assert (
        len(DataCopyTracer.needs_erase) == 0
    ), "Not all locked Data instances were secure erased!"
    assert (
        len(DataCopyTracer.locked_instances) == 0
    ), "Not all locked Data instances were unlocked!"
    assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
Exemple #9
0
def test_neg_read_too_long_data(pyocf_ctx, c_uint16_randomize):
    """
        Check if reading data larger than exported object size is properly blocked
    """

    core = prepare_cache_and_core(Size.from_MiB(1))
    data = Data(int(Size.from_KiB(c_uint16_randomize)))
    completion = io_operation(core, data, IoDir.READ)

    if c_uint16_randomize > 1024:
        assert completion.results["err"] != 0
    else:
        assert completion.results["err"] == 0
Exemple #10
0
def test_seq_cutoff_max_streams(pyocf_ctx):
    """
    Test number of sequential streams tracked by OCF.

    MAX_STREAMS is the maximal amount of streams which OCF is able to track.

    1. Issue MAX_STREAMS requests (write or reads) to cache, 1 sector shorter than
        seq cutoff threshold
    2. Issue MAX_STREAMS-1 requests continuing the streams from 1. to surpass the threshold and
        check if cutoff was triggered (requests used PT engine)
    3. Issue single request to stream not used in 1. or 2. and check if it's been handled by cache
    4. Issue single request to stream least recently used in 1. and 2. and check if it's been
        handled by cache. It should no longer be tracked by OCF, because of request in step 3. which
        overflowed the OCF handling structure)
    """
    MAX_STREAMS = 256
    TEST_STREAMS = MAX_STREAMS + 1  # Number of streams used by test - one more than OCF can track
    core_size = Size.from_MiB(200)
    threshold = Size.from_KiB(4)

    streams = [
        Stream(
            last=Size((stream_no * int(core_size) // TEST_STREAMS),
                      sector_aligned=True),
            length=Size(0),
            direction=choice(list(IoDir)),
        ) for stream_no in range(TEST_STREAMS)
    ]  # Generate MAX_STREAMS + 1 non-overlapping streams

    # Remove one stream - this is the one we are going to use to overflow OCF tracking structure
    # in step 3
    non_active_stream = choice(streams)
    streams.remove(non_active_stream)

    cache = Cache.start_on_device(Volume(Size.from_MiB(200)),
                                  cache_mode=CacheMode.WT)
    core = Core.using_device(Volume(core_size))

    cache.add_core(core)

    cache.set_seq_cut_off_policy(SeqCutOffPolicy.ALWAYS)
    cache.set_seq_cut_off_threshold(threshold)

    # STEP 1
    shuffle(streams)
    io_size = threshold - Size.from_sector(1)
    io_to_streams(core, streams, io_size)

    stats = cache.get_stats()
    assert (stats["req"]["serviced"]["value"] == stats["req"]["total"]["value"]
            == len(streams)), "All request should be serviced - no cutoff"

    old_serviced = len(streams)

    # STEP 2
    lru_stream = streams[0]
    streams.remove(lru_stream)

    shuffle(streams)
    io_to_streams(core, streams, Size.from_sector(1))

    stats = cache.get_stats()
    assert (
        stats["req"]["serviced"]["value"] == old_serviced
    ), "Serviced requests stat should not increase - cutoff engaged for all"
    assert stats["req"]["wr_pt"]["value"] + stats["req"]["rd_pt"][
        "value"] == len(
            streams
        ), "All streams should be handled in PT - cutoff engaged for all streams"

    # STEP 3
    io_to_streams(core, [non_active_stream], Size.from_sector(1))

    stats = cache.get_stats()
    assert (
        stats["req"]["serviced"]["value"] == old_serviced + 1
    ), "This request should be serviced by cache - no cutoff for inactive stream"

    # STEP 4
    io_to_streams(core, [lru_stream], Size.from_sector(1))

    stats = cache.get_stats()
    assert (
        stats["req"]["serviced"]["value"] == old_serviced + 2
    ), "This request should be serviced by cache - lru_stream should be no longer tracked"