コード例 #1
0
ファイル: volume_exp_obj.py プロジェクト: Open-CAS/ocf
    def _exp_obj_md5(self, read_size):
        logging.getLogger("pyocf").warning(
            "Reading whole exported object! This disturbs statistics values")

        read_buffer_all = Data(self.parent.device.size)

        read_buffer = Data(read_size)

        position = 0
        while position < read_buffer_all.size:
            io = self.new_io(self.parent.get_default_queue(), position,
                             read_size, IoDir.READ, 0, 0)
            io.set_data(read_buffer)

            cmpl = OcfCompletion([("err", c_int)])
            io.callback = cmpl.callback
            io.submit()
            cmpl.wait()

            if cmpl.results["err"]:
                raise Exception("Error reading whole exported object")

            read_buffer_all.copy(read_buffer, position, 0, read_size)
            position += read_size

        return read_buffer_all.md5()
コード例 #2
0
def test_simple_wt_write(pyocf_ctx):
    cache_device = Volume(S.from_MiB(30))
    core_device = Volume(S.from_MiB(30))

    cache = Cache.start_on_device(cache_device)
    core = Core.using_device(core_device)

    cache.add_core(core)

    cache_device.reset_stats()
    core_device.reset_stats()

    write_data = Data.from_string("This is test data")
    io = core.new_io(cache.get_default_queue(),
                     S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0)
    io.set_data(write_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    assert cmpl.results["err"] == 0
    assert cache_device.get_stats()[IoDir.WRITE] == 1
    stats = cache.get_stats()
    assert stats["req"]["wr_full_misses"]["value"] == 1
    assert stats["usage"]["occupancy"]["value"] == 1

    assert core.exp_obj_md5() == core_device.md5()
    cache.stop()
コード例 #3
0
ファイル: test_attach_cache.py プロジェクト: abysdom/ocf
def __io(io, queue, address, size, data, direction):
    io.set_data(data, 0)
    completion = OcfCompletion([("err", c_int)])
    io.callback = completion.callback
    io.submit()
    completion.wait()
    return int(completion.results["err"])
コード例 #4
0
ファイル: test_add_remove.py プロジェクト: mmalewski/ocf
def test_10add_remove_with_io(pyocf_ctx):
    # Start cache device
    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device)

    # Create core device
    core_device = Volume(S.from_MiB(10))
    core = Core.using_device(core_device)

    # Add and remove core 10 times in a loop with io in between
    for i in range(0, 10):
        cache.add_core(core)
        stats = cache.get_stats()
        assert stats["conf"]["core_count"] == 1

        write_data = Data.from_string("Test data")
        io = core.new_io()
        io.set_data(write_data)
        io.configure(20, write_data.size, IoDir.WRITE, 0, 0)
        io.set_queue(cache.get_default_queue())

        cmpl = OcfCompletion([("err", c_int)])
        io.callback = cmpl.callback
        io.submit()
        cmpl.wait()

        cache.remove_core(core)
        stats = cache.get_stats()
        assert stats["conf"]["core_count"] == 0
コード例 #5
0
def ocf_write(vol, queue, val, offset):
    data = Data.from_bytes(bytes([val] * 512))
    comp = OcfCompletion([("error", c_int)])
    io = vol.new_io(queue, offset, 512, IoDir.WRITE, 0, 0)
    io.set_data(data)
    io.callback = comp.callback
    io.submit()
    comp.wait()
コード例 #6
0
def ocf_read(vol, queue, offset):
    data = Data(byte_count=512)
    comp = OcfCompletion([("error", c_int)])
    io = vol.new_io(queue, offset, 512, IoDir.READ, 0, 0)
    io.set_data(data)
    io.callback = comp.callback
    io.submit()
    comp.wait()
    return data.get_bytes()[0]
コード例 #7
0
ファイル: test_add_remove.py プロジェクト: Open-CAS/ocf
def _io_to_core(vol: Volume, queue: Queue, data: Data):
    io = vol.new_io(queue, 0, data.size, IoDir.WRITE, 0, 0)
    io.set_data(data)

    completion = OcfCompletion([("err", c_int)])
    io.callback = completion.callback
    io.submit()
    completion.wait()

    assert completion.results["err"] == 0, "IO to exported object completion"
コード例 #8
0
ファイル: test_negative_io.py プロジェクト: mdnfiras/ocf
def io_operation(core: Core, data: Data, io_direction: int, offset: int = 0, io_class: int = 0):
    io = core.new_io(core.cache.get_default_queue(), offset, data.size,
                     io_direction, io_class, 0)
    io.set_data(data)

    completion = OcfCompletion([("err", c_int)])
    io.callback = completion.callback
    io.submit()
    completion.wait()
    return completion
コード例 #9
0
ファイル: test_add_remove.py プロジェクト: abysdom/ocf
def _io_to_core(exported_obj: Core, data: Data):
    io = exported_obj.new_io(exported_obj.cache.get_default_queue(), 0,
                             data.size, IoDir.WRITE, 0, 0)
    io.set_data(data)

    completion = OcfCompletion([("err", c_int)])
    io.callback = completion.callback
    io.submit()
    completion.wait()

    assert completion.results["err"] == 0, "IO to exported object completion"
コード例 #10
0
def io_to_core(exported_obj: Core, data: Data, offset: int, to_core_device=False):
    new_io = exported_obj.new_core_io if to_core_device else exported_obj.new_io
    io = new_io(exported_obj.cache.get_default_queue(), offset, data.size,
                IoDir.WRITE, 0, 0)
    io.set_data(data)

    completion = OcfCompletion([("err", c_int)])
    io.callback = completion.callback
    io.submit()
    completion.wait()

    assert completion.results["err"] == 0, "IO to exported object completion"
コード例 #11
0
def fill_cache(cache, fill_ratio):
    """
    Helper to fill cache from LBA 0.
    TODO:
        * make it generic and share across all tests
        * reasonable error handling
    """

    cache_lines = cache.get_stats()["conf"]["size"]

    bytes_to_fill = cache_lines.bytes * fill_ratio
    max_io_size = cache.device.get_max_io_size().bytes

    ios_to_issue = math.floor(bytes_to_fill / max_io_size)

    core = cache.cores[0]
    completions = []
    for i in range(ios_to_issue):
        comp = OcfCompletion([("error", c_int)])
        write_data = Data(max_io_size)
        io = core.new_io(
            cache.get_default_queue(),
            i * max_io_size,
            write_data.size,
            IoDir.WRITE,
            0,
            0,
        )
        io.set_data(write_data)
        io.callback = comp.callback
        completions += [comp]
        io.submit()

    if bytes_to_fill % max_io_size:
        comp = OcfCompletion([("error", c_int)])
        write_data = Data(
            Size.from_B(bytes_to_fill % max_io_size, sector_aligned=True))
        io = core.new_io(
            cache.get_default_queue(),
            ios_to_issue * max_io_size,
            write_data.size,
            IoDir.WRITE,
            0,
            0,
        )
        io.set_data(write_data)
        io.callback = comp.callback
        completions += [comp]
        io.submit()

    for c in completions:
        c.wait()
コード例 #12
0
def io_from_exported_object(vol: Volume, queue: Queue, buffer_size: int,
                            offset: int):
    read_buffer = Data(buffer_size)
    io = vol.new_io(queue, offset, read_buffer.size, IoDir.READ, 0, 0)
    io.set_data(read_buffer)

    completion = OcfCompletion([("err", c_int)])
    io.callback = completion.callback
    io.submit()
    completion.wait()

    assert completion.results["err"] == 0, "IO from exported object completion"
    return read_buffer
コード例 #13
0
def io_from_exported_object(exported_obj: Core, buffer_size: int, offset: int):
    read_buffer = Data(buffer_size)
    io = exported_obj.new_io(exported_obj.cache.get_default_queue(), offset,
                             read_buffer.size, IoDir.READ, 0, 0)
    io.set_data(read_buffer)

    completion = OcfCompletion([("err", c_int)])
    io.callback = completion.callback
    io.submit()
    completion.wait()

    assert completion.results["err"] == 0, "IO from exported object completion"
    return read_buffer
コード例 #14
0
ファイル: volume_exp_obj.py プロジェクト: Open-CAS/ocf
 def _read(self, offset=0, size=0):
     if size == 0:
         size = self.get_length().B - offset
     exp_obj_io = self.__alloc_io(offset, size, IoDir.READ, 0, 0)
     completion = OcfCompletion([("err", c_int)])
     exp_obj_io.callback = completion
     data = Data.from_bytes(bytes(size))
     exp_obj_io.set_data(data)
     exp_obj_io.submit()
     completion.wait()
     error = completion.results["err"]
     if error:
         raise Exception("error reading exported object for dump")
     return data
コード例 #15
0
def test_secure_erase_simple_io_cleaning():
    """
        Perform simple IO which will trigger WB cleaning. Track all the data from
        cleaner (locked) and make sure they are erased and unlocked after use.

        1. Start cache in WB mode
        2. Write single sector at LBA 0
        3. Read whole cache line at LBA 0
        4. Assert that 3. triggered cleaning
        5. Check if all locked Data copies were erased and unlocked
    """
    ctx = OcfCtx(
        OcfLib.getInstance(),
        b"Security tests ctx",
        DefaultLogger(LogLevel.WARN),
        DataCopyTracer,
        Cleaner,
    )

    ctx.register_volume_type(RamVolume)

    cache_device = RamVolume(S.from_MiB(50))
    cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB)

    core_device = RamVolume(S.from_MiB(100))
    core = Core.using_device(core_device)
    cache.add_core(core)
    vol = CoreVolume(core, open=True)
    queue = cache.get_default_queue()

    read_data = Data(S.from_sector(1).B)
    io = vol.new_io(queue,
                    S.from_sector(1).B, read_data.size, IoDir.WRITE, 0, 0)
    io.set_data(read_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    read_data = Data(S.from_sector(8).B)
    io = vol.new_io(queue,
                    S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0)
    io.set_data(read_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    stats = cache.get_stats()

    ctx.exit()

    assert (len(DataCopyTracer.needs_erase) == 0
            ), "Not all locked Data instances were secure erased!"
    assert (len(DataCopyTracer.locked_instances) == 0
            ), "Not all locked Data instances were unlocked!"
    assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
コード例 #16
0
def io_operation(
    vol: Volume,
    queue: Queue,
    data: Data,
    io_direction: int,
    offset: int = 0,
    io_class: int = 0,
):
    io = vol.new_io(queue, offset, data.size, io_direction, io_class, 0)
    io.set_data(data)

    completion = OcfCompletion([("err", c_int)])
    io.callback = completion.callback
    io.submit()
    completion.wait()
    return completion
コード例 #17
0
def test_partial_hit_promotion(pyocf_ctx):
    """
    Check if NHIT promotion policy doesn't prevent partial hits from getting
    promoted to cache

    1. Create core/cache pair with promotion policy ALWAYS
    2. Issue one-sector IO to cache to insert partially valid cache line
    3. Set NHIT promotion policy with trigger=0 (always triggered) and high
    insertion threshold
    4. Issue a request containing partially valid cache line and next cache line
        * occupancy should rise - partially hit request should bypass nhit criteria
    """

    # Step 1
    cache_device = Volume(Size.from_MiB(30))
    core_device = Volume(Size.from_MiB(30))

    cache = Cache.start_on_device(cache_device)
    core = Core.using_device(core_device)
    cache.add_core(core)

    # Step 2
    comp = OcfCompletion([("error", c_int)])
    write_data = Data(Size.from_sector(1))
    io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.READ, 0, 0)
    io.set_data(write_data)
    io.callback = comp.callback
    io.submit()

    comp.wait()

    stats = cache.get_stats()
    cache_lines = stats["conf"]["size"]
    assert stats["usage"]["occupancy"]["value"] == 1

    # Step 3
    cache.set_promotion_policy(PromotionPolicy.NHIT)
    cache.set_promotion_policy_param(
        PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, 0
    )
    cache.set_promotion_policy_param(
        PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, 100
    )

    # Step 4
    comp = OcfCompletion([("error", c_int)])
    write_data = Data(2 * cache_lines.line_size)
    io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.WRITE, 0, 0)
    io.set_data(write_data)
    io.callback = comp.callback
    io.submit()
    comp.wait()

    stats = cache.get_stats()
    assert (
        stats["usage"]["occupancy"]["value"] == 2
    ), "Second cache line should be mapped"
コード例 #18
0
def test_start_stop_noqueue(pyocf_ctx):
    # cache object just to construct cfg conveniently
    _cache = Cache(pyocf_ctx.ctx_handle)

    cache_handle = c_void_p()
    status = pyocf_ctx.lib.ocf_mngt_cache_start(
        pyocf_ctx.ctx_handle, byref(cache_handle), byref(_cache.cfg)
    )
    assert not status, "Failed to start cache: {}".format(status)

    # stop without creating mngmt queue
    c = OcfCompletion(
        [("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
    )
    pyocf_ctx.lib.ocf_mngt_cache_stop(cache_handle, c, None)
    c.wait()
    assert not c.results["error"], "Failed to stop cache: {}".format(c.results["error"])
コード例 #19
0
def test_start_stop_noqueue(pyocf_ctx):
    cfg = CacheConfig()
    pyocf_ctx.lib.ocf_mngt_cache_config_set_default_wrapper(byref(cfg))
    cfg._metadata_volatile = True
    cfg._name = "Test".encode("ascii")

    cache_handle = c_void_p()
    status = pyocf_ctx.lib.ocf_mngt_cache_start(pyocf_ctx.ctx_handle,
                                                byref(cache_handle),
                                                byref(cfg), None)
    assert not status, "Failed to start cache: {}".format(status)

    # stop without creating mngmt queue
    c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p),
                       ("error", c_int)])
    pyocf_ctx.lib.ocf_mngt_cache_stop(cache_handle, c, None)
    c.wait()
    assert not c.results["error"], "Failed to stop cache: {}".format(
        c.results["error"])
コード例 #20
0
ファイル: test_eviction.py プロジェクト: Open-CAS/ocf
def send_io(core: Core, data: Data, addr: int = 0, target_ioclass: int = 0):
    vol = core.get_front_volume()
    io = vol.new_io(
        core.cache.get_default_queue(),
        addr,
        data.size,
        IoDir.WRITE,
        target_ioclass,
        0,
    )

    io.set_data(data)

    completion = OcfCompletion([("err", c_int)])
    io.callback = completion.callback
    io.submit()
    completion.wait()

    assert completion.results["err"] == 0, "IO to exported object completion"
コード例 #21
0
def _io(vol, queue, addr, size, direction, context):
    comp = OcfCompletion([("error", c_int)], context=context)
    data = Data(size)

    io = vol.new_io(queue, addr, size, direction, 0, 0)
    io.set_data(data)
    io.callback = comp.callback
    io.submit()

    return comp
コード例 #22
0
def _io(core, addr, size, direction, context):
    comp = OcfCompletion([("error", c_int)], context=context)
    data = Data(size)

    io = core.new_io(core.cache.get_default_queue(), addr, size, direction, 0,
                     0)
    io.set_data(data)
    io.callback = comp.callback
    io.submit()

    return comp
コード例 #23
0
def test_secure_erase_simple_io_cleaning():
    """
        Perform simple IO which will trigger WB cleaning. Track all the data from
        cleaner (locked) and make sure they are erased and unlocked after use.
    """
    ctx = OcfCtx(
        OcfLib.getInstance(),
        b"Security tests ctx",
        DefaultLogger(LogLevel.WARN),
        DataCopyTracer,
        MetadataUpdater,
        Cleaner,
    )

    ctx.register_volume_type(Volume)

    cache_device = Volume(S.from_MiB(30))
    cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB)

    core_device = Volume(S.from_MiB(100))
    core = Core.using_device(core_device)
    cache.add_core(core)

    cmpls = []
    for i in range(10000):
        read_data = Data(S.from_KiB(120))
        io = core.new_io()
        io.set_data(read_data)
        io.configure(
            (i * 1259) % int(core_device.size), read_data.size, IoDir.WRITE, 0, 0
        )
        io.set_queue(cache.get_default_queue())

        cmpl = OcfCompletion([("err", c_int)])
        io.callback = cmpl.callback
        cmpls.append(cmpl)
        io.submit()

    for c in cmpls:
        c.wait()

    stats = cache.get_stats()

    ctx.exit()

    assert (
        len(DataCopyTracer.needs_erase) == 0
    ), "Not all locked Data instances were secure erased!"
    assert (
        len(DataCopyTracer.locked_instances) == 0
    ), "Not all locked Data instances were unlocked!"
    assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
コード例 #24
0
ファイル: test_pyocf.py プロジェクト: Open-CAS/ocf
def test_load_cache_with_cores(pyocf_ctx, open_cores):
    cache_device = RamVolume(S.from_MiB(40))
    core_device = RamVolume(S.from_MiB(40))

    cache = Cache.start_on_device(cache_device)
    core = Core.using_device(core_device, name="test_core")

    cache.add_core(core)
    vol = CoreVolume(core, open=True)

    write_data = Data.from_string("This is test data")
    io = vol.new_io(cache.get_default_queue(),
                    S.from_sector(3).B, write_data.size, IoDir.WRITE, 0, 0)
    io.set_data(write_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    cache.stop()

    cache = Cache.load_from_device(cache_device, open_cores=open_cores)
    if not open_cores:
        cache.add_core(core, try_add=True)
    else:
        core = cache.get_core_by_name("test_core")

    vol = CoreVolume(core, open=True)

    read_data = Data(write_data.size)
    io = vol.new_io(cache.get_default_queue(),
                    S.from_sector(3).B, read_data.size, IoDir.READ, 0, 0)
    io.set_data(read_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    assert read_data.md5() == write_data.md5()
    assert vol.md5() == core_device.md5()
コード例 #25
0
def test_change_to_nhit_and_back_io_in_flight(pyocf_ctx):
    """
    Try switching promotion policy during io, no io's should return with error

    1. Create core/cache pair with promotion policy ALWAYS
    2. Issue IOs without waiting for completion
    3. Change promotion policy to NHIT
    4. Wait for IO completions
        * no IOs should fail
    5. Issue IOs without waiting for completion
    6. Change promotion policy to ALWAYS
    7. Wait for IO completions
        * no IOs should fail
    """

    # Step 1
    cache_device = Volume(Size.from_MiB(30))
    core_device = Volume(Size.from_MiB(30))

    cache = Cache.start_on_device(cache_device)
    core = Core.using_device(core_device)

    cache.add_core(core)

    # Step 2
    completions = []
    for i in range(2000):
        comp = OcfCompletion([("error", c_int)])
        write_data = Data(4096)
        io = core.new_io(cache.get_default_queue(), i * 4096, write_data.size,
                         IoDir.WRITE, 0, 0)
        completions += [comp]
        io.set_data(write_data)
        io.callback = comp.callback
        io.submit()

    # Step 3
    cache.set_promotion_policy(PromotionPolicy.NHIT)

    # Step 4
    for c in completions:
        c.wait()
        assert not c.results[
            "error"], "No IO's should fail when turning NHIT policy on"

    # Step 5
    completions = []
    for i in range(2000):
        comp = OcfCompletion([("error", c_int)])
        write_data = Data(4096)
        io = core.new_io(cache.get_default_queue(), i * 4096, write_data.size,
                         IoDir.WRITE, 0, 0)
        completions += [comp]
        io.set_data(write_data)
        io.callback = comp.callback
        io.submit()

    # Step 6
    cache.set_promotion_policy(PromotionPolicy.ALWAYS)

    # Step 7
    for c in completions:
        c.wait()
        assert not c.results[
            "error"], "No IO's should fail when turning NHIT policy off"
コード例 #26
0
def test_promoted_after_hits_various_thresholds(pyocf_ctx, insertion_threshold,
                                                fill_percentage):
    """
    Check promotion policy behavior with various set thresholds

    1. Create core/cache pair with promotion policy NHIT
    2. Set TRIGGER_THRESHOLD/INSERTION_THRESHOLD to predefined values
    3. Fill cache from the beggining until occupancy reaches TRIGGER_THRESHOLD%
    4. Issue INSERTION_THRESHOLD - 1 requests to core line not inserted to cache
        * occupancy should not change
    5. Issue one request to LBA from step 4
        * occupancy should rise by one cache line
    """

    # Step 1
    cache_device = Volume(Size.from_MiB(30))
    core_device = Volume(Size.from_MiB(30))

    cache = Cache.start_on_device(cache_device,
                                  promotion_policy=PromotionPolicy.NHIT)
    core = Core.using_device(core_device)
    cache.add_core(core)

    # Step 2
    cache.set_promotion_policy_param(PromotionPolicy.NHIT,
                                     NhitParams.TRIGGER_THRESHOLD,
                                     fill_percentage)
    cache.set_promotion_policy_param(PromotionPolicy.NHIT,
                                     NhitParams.INSERTION_THRESHOLD,
                                     insertion_threshold)
    # Step 3
    fill_cache(cache, fill_percentage / 100)

    stats = cache.get_stats()
    cache_lines = stats["conf"]["size"]
    assert stats["usage"]["occupancy"]["fraction"] // 10 == fill_percentage * 10
    filled_occupancy = stats["usage"]["occupancy"]["value"]

    # Step 4
    last_core_line = int(core_device.size) - cache_lines.line_size
    completions = []
    for i in range(insertion_threshold - 1):
        comp = OcfCompletion([("error", c_int)])
        write_data = Data(cache_lines.line_size)
        io = core.new_io(
            cache.get_default_queue(),
            last_core_line,
            write_data.size,
            IoDir.WRITE,
            0,
            0,
        )
        completions += [comp]
        io.set_data(write_data)
        io.callback = comp.callback
        io.submit()

    for c in completions:
        c.wait()

    stats = cache.get_stats()
    threshold_reached_occupancy = stats["usage"]["occupancy"]["value"]
    assert threshold_reached_occupancy == filled_occupancy, (
        "No insertion should occur while NHIT is triggered and core line ",
        "didn't reach INSERTION_THRESHOLD",
    )

    # Step 5
    comp = OcfCompletion([("error", c_int)])
    write_data = Data(cache_lines.line_size)
    io = core.new_io(cache.get_default_queue(), last_core_line,
                     write_data.size, IoDir.WRITE, 0, 0)
    io.set_data(write_data)
    io.callback = comp.callback
    io.submit()

    comp.wait()

    assert (threshold_reached_occupancy ==
            cache.get_stats()["usage"]["occupancy"]["value"] -
            1), "Previous request should be promoted and occupancy should rise"
コード例 #27
0
def test_secure_erase_simple_io_read_misses(cache_mode):
    """
        Perform simple IO which will trigger read misses, which in turn should
        trigger backfill. Track all the data locked/copied for backfill and make
        sure OCF calls secure erase and unlock on them.
    """
    ctx = OcfCtx(
        OcfLib.getInstance(),
        b"Security tests ctx",
        DefaultLogger(LogLevel.WARN),
        DataCopyTracer,
        Cleaner,
    )

    ctx.register_volume_type(RamVolume)

    cache_device = RamVolume(S.from_MiB(50))
    cache = Cache.start_on_device(cache_device, cache_mode=cache_mode)

    core_device = RamVolume(S.from_MiB(50))
    core = Core.using_device(core_device)
    cache.add_core(core)
    vol = CoreVolume(core, open=True)
    queue = cache.get_default_queue()

    write_data = DataCopyTracer(S.from_sector(1))
    io = vol.new_io(
        queue,
        S.from_sector(1).B,
        write_data.size,
        IoDir.WRITE,
        0,
        0,
    )
    io.set_data(write_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    cmpls = []
    for i in range(100):
        read_data = DataCopyTracer(S.from_sector(1))
        io = vol.new_io(
            queue,
            i * S.from_sector(1).B,
            read_data.size,
            IoDir.READ,
            0,
            0,
        )
        io.set_data(read_data)

        cmpl = OcfCompletion([("err", c_int)])
        io.callback = cmpl.callback
        cmpls.append(cmpl)
        io.submit()

    for c in cmpls:
        c.wait()

    write_data = DataCopyTracer.from_string("TEST DATA" * 100)
    io = vol.new_io(queue, S.from_sector(1), write_data.size, IoDir.WRITE, 0,
                    0)
    io.set_data(write_data)

    cmpl = OcfCompletion([("err", c_int)])
    io.callback = cmpl.callback
    io.submit()
    cmpl.wait()

    stats = cache.get_stats()

    ctx.exit()

    assert (len(DataCopyTracer.needs_erase) == 0
            ), "Not all locked Data instances were secure erased!"
    assert (len(DataCopyTracer.locked_instances) == 0
            ), "Not all locked Data instances were unlocked!"
    assert (stats["req"]["rd_partial_misses"]["value"] +
            stats["req"]["rd_full_misses"]["value"]) > 0