def test_eviction_two_cores(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): """Test if eviction works correctly when remapping cachelines between distinct cores.""" cache_device = Volume(Size.from_MiB(20)) core_device1 = Volume(Size.from_MiB(40)) core_device2 = Volume(Size.from_MiB(40)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) cache_size = cache.get_stats()["conf"]["size"] core_exported1 = Core.using_device(core_device1, name="core1") core_exported2 = Core.using_device(core_device2, name="core2") cache.add_core(core_exported1) cache.add_core(core_exported2) valid_io_size = Size.from_B(cache_size.B) test_data = Data(valid_io_size) send_io(core_exported1, test_data) send_io(core_exported2, test_data) stats1 = core_exported1.get_stats() stats2 = core_exported2.get_stats() # IO to the second core should evict all the data from the first core assert stats1["usage"]["occupancy"]["value"] == 0 assert stats2["usage"]["occupancy"]["value"] == valid_io_size.blocks_4k
def test_start_too_small_device(pyocf_ctx, mode, cls): """Starting cache with device below 100MiB Check if starting cache with device below minimum size is blocked """ cache_device = Volume(Size.from_B(20 * 1024 * 1024 - 1)) with pytest.raises(OcfError, match="OCF_ERR_START_CACHE_FAIL"): Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): """Test if eviction does not occur when IO greater than cache size is submitted. """ cache_device = Volume(Size.from_MiB(20)) core_device = Volume(Size.from_MiB(5)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) cache_size = cache.get_stats()['conf']['size'] core_exported = Core.using_device(core_device) cache.add_core(core_exported) cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) valid_io_size = Size.from_B(cache_size.B // 2) test_data = Data(valid_io_size) send_io(core_exported, test_data) stats = core_exported.cache.get_stats() first_block_sts = stats['block'] first_usage_sts = stats['usage'] pt_writes_first = stats['req']['wr_pt'] assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\ "Occupancy after first IO" prev_writes_to_core = stats["block"]["core_volume_wr"]["value"] # Anything below 5 MiB is a valid size (less than core device size) # Writing over cache size (to the offset above first io) in this case should go # directly to core and shouldn't trigger eviction io_size_bigger_than_cache = Size.from_MiB(2) io_offset = valid_io_size test_data = Data(io_size_bigger_than_cache) send_io(core_exported, test_data, io_offset) if mode is not CacheMode.WT: # Flush first write cache.flush() stats = core_exported.cache.get_stats() second_block_sts = stats['block'] second_usage_sts = stats['usage'] pt_writes_second = stats['req']['wr_pt'] # Second write shouldn't affect cache and should go directly to core. # Cache occupancy shouldn't change # Second IO should go in PT assert first_usage_sts['occupancy'] == \ second_usage_sts['occupancy'] assert pt_writes_first['value'] == 0 assert pt_writes_second['value'] == 1 assert second_block_sts['cache_volume_wr'][ 'value'] == valid_io_size.blocks_4k assert second_block_sts['core_volume_wr']['value'] == valid_io_size.blocks_4k + \ io_size_bigger_than_cache.blocks_4k
def test_neg_size_unaligned(pyocf_ctx, c_uint16_randomize): """ Check that write operations are blocked when IO size is not aligned """ vol, queue = prepare_cache_and_core(Size.from_MiB(2)) data = Data(int(Size.from_B(c_uint16_randomize))) if c_uint16_randomize % 512 != 0: with pytest.raises(Exception): vol.new_io(queue, 0, data.size, IoDir.WRITE, 0, 0)
def test_neg_size_unaligned(pyocf_ctx, c_uint16_randomize): """ Check that write operations are blocked when IO size is not aligned """ core = prepare_cache_and_core(Size.from_MiB(2)) data = Data(int(Size.from_B(c_uint16_randomize))) if c_uint16_randomize % 512 != 0: with pytest.raises(Exception, match="Failed to create io!"): core.new_io(core.cache.get_default_queue(), 0, data.size, IoDir.WRITE, 0, 0)
def fill_cache(cache, fill_ratio): """ Helper to fill cache from LBA 0. TODO: * make it generic and share across all tests * reasonable error handling """ cache_lines = cache.get_stats()["conf"]["size"] bytes_to_fill = cache_lines.bytes * fill_ratio max_io_size = cache.device.get_max_io_size().bytes ios_to_issue = math.floor(bytes_to_fill / max_io_size) core = cache.cores[0] completions = [] for i in range(ios_to_issue): comp = OcfCompletion([("error", c_int)]) write_data = Data(max_io_size) io = core.new_io( cache.get_default_queue(), i * max_io_size, write_data.size, IoDir.WRITE, 0, 0, ) io.set_data(write_data) io.callback = comp.callback completions += [comp] io.submit() if bytes_to_fill % max_io_size: comp = OcfCompletion([("error", c_int)]) write_data = Data( Size.from_B(bytes_to_fill % max_io_size, sector_aligned=True)) io = core.new_io( cache.get_default_queue(), ios_to_issue * max_io_size, write_data.size, IoDir.WRITE, 0, 0, ) io.set_data(write_data) io.callback = comp.callback completions += [comp] io.submit() for c in completions: c.wait()
class JobSpec: readwrite: ReadWrite = ReadWrite.READ randseed: int = 1 rwmixwrite: int = 50 randommap: bool = True bs: Size = Size.from_B(512) offset: Size = Size(0) njobs: int = 1 qd: int = 1 size: Size = Size(0) io_size: Size = Size(0) target: Volume = None time_based: bool = False time: timedelta = None continue_on_error: bool = False def merge(self, other): # TODO implement return other
def get_length(self): return Size.from_B((cls * c_uint32(-1).value))
def get_max_io_size(self): return Size.from_B(OcfLib.getInstance().ocf_volume_get_max_io_size( self.c_vol))
def get_length(self): return Size.from_B(OcfLib.getInstance().ocf_volume_get_length( self.c_vol))