def set_io_class_info(cache, desc): ioclasses_info = IoClassesInfo() for i in range(IoClassesInfo.MAX_IO_CLASSES): ioclasses_info._config[i]._class_id = i ioclasses_info._config[i]._name = desc[i]["_name"].encode("utf-8") ioclasses_info._config[i]._priority = desc[i]["_priority"] ioclasses_info._config[i]._cache_mode = desc[i]["_cache_mode"] ioclasses_info._config[i]._max_size = desc[i]["_max_size"] OcfLib.getInstance().ocf_mngt_cache_io_classes_configure( cache, byref(ioclasses_info) )
def test_secure_erase_simple_io_cleaning(): """ Perform simple IO which will trigger WB cleaning. Track all the data from cleaner (locked) and make sure they are erased and unlocked after use. 1. Start cache in WB mode 2. Write single sector at LBA 0 3. Read whole cache line at LBA 0 4. Assert that 3. triggered cleaning 5. Check if all locked Data copies were erased and unlocked """ ctx = OcfCtx( OcfLib.getInstance(), b"Security tests ctx", DefaultLogger(LogLevel.WARN), DataCopyTracer, Cleaner, ) ctx.register_volume_type(RamVolume) cache_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB) core_device = RamVolume(S.from_MiB(100)) core = Core.using_device(core_device) cache.add_core(core) vol = CoreVolume(core, open=True) queue = cache.get_default_queue() read_data = Data(S.from_sector(1).B) io = vol.new_io(queue, S.from_sector(1).B, read_data.size, IoDir.WRITE, 0, 0) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() read_data = Data(S.from_sector(8).B) io = vol.new_io(queue, S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() stats = cache.get_stats() ctx.exit() assert (len(DataCopyTracer.needs_erase) == 0 ), "Not all locked Data instances were secure erased!" assert (len(DataCopyTracer.locked_instances) == 0 ), "Not all locked Data instances were unlocked!" assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
def test_secure_erase_simple_io_cleaning(): """ Perform simple IO which will trigger WB cleaning. Track all the data from cleaner (locked) and make sure they are erased and unlocked after use. """ ctx = OcfCtx( OcfLib.getInstance(), b"Security tests ctx", DefaultLogger(LogLevel.WARN), DataCopyTracer, MetadataUpdater, Cleaner, ) ctx.register_volume_type(Volume) cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB) core_device = Volume(S.from_MiB(100)) core = Core.using_device(core_device) cache.add_core(core) cmpls = [] for i in range(10000): read_data = Data(S.from_KiB(120)) io = core.new_io() io.set_data(read_data) io.configure( (i * 1259) % int(core_device.size), read_data.size, IoDir.WRITE, 0, 0 ) io.set_queue(cache.get_default_queue()) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback cmpls.append(cmpl) io.submit() for c in cmpls: c.wait() stats = cache.get_stats() ctx.exit() assert ( len(DataCopyTracer.needs_erase) == 0 ), "Not all locked Data instances were secure erased!" assert ( len(DataCopyTracer.locked_instances) == 0 ), "Not all locked Data instances were unlocked!" assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
def get_cache_by_name(ctx, cache_name): cache_pointer = c_void_p() return OcfLib.getInstance().ocf_mngt_cache_get_by_name( ctx.ctx_handle, cache_name, byref(cache_pointer) )
def test_secure_erase_simple_io_read_misses(cache_mode): """ Perform simple IO which will trigger read misses, which in turn should trigger backfill. Track all the data locked/copied for backfill and make sure OCF calls secure erase and unlock on them. """ ctx = OcfCtx( OcfLib.getInstance(), b"Security tests ctx", DefaultLogger(LogLevel.WARN), DataCopyTracer, Cleaner, ) ctx.register_volume_type(RamVolume) cache_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device, cache_mode=cache_mode) core_device = RamVolume(S.from_MiB(50)) core = Core.using_device(core_device) cache.add_core(core) vol = CoreVolume(core, open=True) queue = cache.get_default_queue() write_data = DataCopyTracer(S.from_sector(1)) io = vol.new_io( queue, S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0, ) io.set_data(write_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() cmpls = [] for i in range(100): read_data = DataCopyTracer(S.from_sector(1)) io = vol.new_io( queue, i * S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0, ) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback cmpls.append(cmpl) io.submit() for c in cmpls: c.wait() write_data = DataCopyTracer.from_string("TEST DATA" * 100) io = vol.new_io(queue, S.from_sector(1), write_data.size, IoDir.WRITE, 0, 0) io.set_data(write_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() stats = cache.get_stats() ctx.exit() assert (len(DataCopyTracer.needs_erase) == 0 ), "Not all locked Data instances were secure erased!" assert (len(DataCopyTracer.locked_instances) == 0 ), "Not all locked Data instances were unlocked!" assert (stats["req"]["rd_partial_misses"]["value"] + stats["req"]["rd_full_misses"]["value"]) > 0