def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): """Test starting cache in different modes with different cache line sizes. After start check proper cache mode behaviour, starting with write operation. """ cache_device = Volume(Size.from_MiB(40)) core_device = Volume(Size.from_MiB(10)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) core_exported = Core.using_device(core_device) cache.add_core(core_exported) logger.info("[STAGE] Initial write to exported object") cache_device.reset_stats() core_device.reset_stats() test_data = Data.from_string("This is test data") io_to_core(core_exported, test_data, Size.from_sector(1).B) check_stats_write_empty(core_exported, mode, cls) logger.info("[STAGE] Read from exported object after initial write") io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B) check_stats_read_after_write(core_exported, mode, cls, True) logger.info("[STAGE] Write to exported object after read") cache_device.reset_stats() core_device.reset_stats() test_data = Data.from_string("Changed test data") io_to_core(core_exported, test_data, Size.from_sector(1).B) check_stats_write_after_read(core_exported, mode, cls) check_md5_sums(core_exported, mode)
def test_secure_erase_simple_io_cleaning(): """ Perform simple IO which will trigger WB cleaning. Track all the data from cleaner (locked) and make sure they are erased and unlocked after use. 1. Start cache in WB mode 2. Write single sector at LBA 0 3. Read whole cache line at LBA 0 4. Assert that 3. triggered cleaning 5. Check if all locked Data copies were erased and unlocked """ ctx = OcfCtx( OcfLib.getInstance(), b"Security tests ctx", DefaultLogger(LogLevel.WARN), DataCopyTracer, Cleaner, ) ctx.register_volume_type(RamVolume) cache_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB) core_device = RamVolume(S.from_MiB(100)) core = Core.using_device(core_device) cache.add_core(core) vol = CoreVolume(core, open=True) queue = cache.get_default_queue() read_data = Data(S.from_sector(1).B) io = vol.new_io(queue, S.from_sector(1).B, read_data.size, IoDir.WRITE, 0, 0) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() read_data = Data(S.from_sector(8).B) io = vol.new_io(queue, S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() stats = cache.get_stats() ctx.exit() assert (len(DataCopyTracer.needs_erase) == 0 ), "Not all locked Data instances were secure erased!" assert (len(DataCopyTracer.locked_instances) == 0 ), "Not all locked Data instances were unlocked!" assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
def test_10add_remove_with_io(pyocf_ctx): # Start cache device cache_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device) # Create core device core_device = RamVolume(S.from_MiB(10)) core = Core.using_device(core_device) # Add and remove core 10 times in a loop with io in between for i in range(0, 10): cache.add_core(core) vol = CoreVolume(core, open=True) stats = cache.get_stats() assert stats["conf"]["core_count"] == 1 write_data = Data.from_string("Test data") io = vol.new_io(cache.get_default_queue(), S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0) io.set_data(write_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() cache.remove_core(core) stats = cache.get_stats() assert stats["conf"]["core_count"] == 0
def test_simple_wt_write(pyocf_ctx): cache_device = Volume(S.from_MiB(30)) core_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device) cache.add_core(core) cache_device.reset_stats() core_device.reset_stats() write_data = Data.from_string("This is test data") io = core.new_io(cache.get_default_queue(), S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0) io.set_data(write_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() assert cmpl.results["err"] == 0 assert cache_device.get_stats()[IoDir.WRITE] == 1 stats = cache.get_stats() assert stats["req"]["wr_full_misses"]["value"] == 1 assert stats["usage"]["occupancy"]["value"] == 1 assert core.exp_obj_md5() == core_device.md5() cache.stop()
def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): """Starting cache in different modes with different cache line sizes. After start check proper cache mode behaviour, starting with read operation. """ cache_device = RamVolume(Size.from_MiB(50)) core_device = RamVolume(Size.from_MiB(5)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) core = Core.using_device(core_device) cache.add_core(core) front_vol = CoreVolume(core, open=True) bottom_vol = core.get_volume() queue = cache.get_default_queue() logger.info("[STAGE] Initial write to core device") test_data = Data.from_string("This is test data") io_to_core(bottom_vol, queue, test_data, Size.from_sector(1).B) cache_device.reset_stats() core_device.reset_stats() logger.info("[STAGE] Initial read from exported object") io_from_exported_object(front_vol, queue, test_data.size, Size.from_sector(1).B) check_stats_read_empty(core, mode, cls) logger.info("[STAGE] Write to exported object after initial read") cache_device.reset_stats() core_device.reset_stats() test_data = Data.from_string("Changed test data") io_to_core(front_vol, queue, test_data, Size.from_sector(1).B) check_stats_write_after_read(core, mode, cls, True) logger.info("[STAGE] Read from exported object after write") io_from_exported_object(front_vol, queue, test_data.size, Size.from_sector(1).B) check_stats_read_after_write(core, mode, cls) check_md5_sums(core, mode)
def test_partial_hit_promotion(pyocf_ctx): """ Check if NHIT promotion policy doesn't prevent partial hits from getting promoted to cache 1. Create core/cache pair with promotion policy ALWAYS 2. Issue one-sector IO to cache to insert partially valid cache line 3. Set NHIT promotion policy with trigger=0 (always triggered) and high insertion threshold 4. Issue a request containing partially valid cache line and next cache line * occupancy should rise - partially hit request should bypass nhit criteria """ # Step 1 cache_device = Volume(Size.from_MiB(30)) core_device = Volume(Size.from_MiB(30)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device) cache.add_core(core) # Step 2 comp = OcfCompletion([("error", c_int)]) write_data = Data(Size.from_sector(1)) io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.READ, 0, 0) io.set_data(write_data) io.callback = comp.callback io.submit() comp.wait() stats = cache.get_stats() cache_lines = stats["conf"]["size"] assert stats["usage"]["occupancy"]["value"] == 1 # Step 3 cache.set_promotion_policy(PromotionPolicy.NHIT) cache.set_promotion_policy_param( PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, 0 ) cache.set_promotion_policy_param( PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, 100 ) # Step 4 comp = OcfCompletion([("error", c_int)]) write_data = Data(2 * cache_lines.line_size) io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.WRITE, 0, 0) io.set_data(write_data) io.callback = comp.callback io.submit() comp.wait() stats = cache.get_stats() assert ( stats["usage"]["occupancy"]["value"] == 2 ), "Second cache line should be mapped"
def test_load_cache_with_cores(pyocf_ctx, open_cores): cache_device = RamVolume(S.from_MiB(40)) core_device = RamVolume(S.from_MiB(40)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device, name="test_core") cache.add_core(core) vol = CoreVolume(core, open=True) write_data = Data.from_string("This is test data") io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B, write_data.size, IoDir.WRITE, 0, 0) io.set_data(write_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() cache.stop() cache = Cache.load_from_device(cache_device, open_cores=open_cores) if not open_cores: cache.add_core(core, try_add=True) else: core = cache.get_core_by_name("test_core") vol = CoreVolume(core, open=True) read_data = Data(write_data.size) io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B, read_data.size, IoDir.READ, 0, 0) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() assert read_data.md5() == write_data.md5() assert vol.md5() == core_device.md5()
def test_simple_wt_write(pyocf_ctx): cache_device = RamVolume(S.from_MiB(50)) core_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device) queue = cache.get_default_queue() cache.add_core(core) vol = CoreVolume(core, open=True) cache_device.reset_stats() core_device.reset_stats() r = Rio().target(vol).readwrite(ReadWrite.WRITE).size( S.from_sector(1)).run([queue]) assert cache_device.get_stats()[IoDir.WRITE] == 1 cache.settle() stats = cache.get_stats() assert stats["req"]["wr_full_misses"]["value"] == 1 assert stats["usage"]["occupancy"]["value"] == 1 assert vol.md5() == core_device.md5() cache.stop()
def test_secure_erase_simple_io_read_misses(cache_mode): """ Perform simple IO which will trigger read misses, which in turn should trigger backfill. Track all the data locked/copied for backfill and make sure OCF calls secure erase and unlock on them. """ ctx = OcfCtx( OcfLib.getInstance(), b"Security tests ctx", DefaultLogger(LogLevel.WARN), DataCopyTracer, Cleaner, ) ctx.register_volume_type(RamVolume) cache_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device, cache_mode=cache_mode) core_device = RamVolume(S.from_MiB(50)) core = Core.using_device(core_device) cache.add_core(core) vol = CoreVolume(core, open=True) queue = cache.get_default_queue() write_data = DataCopyTracer(S.from_sector(1)) io = vol.new_io( queue, S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0, ) io.set_data(write_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() cmpls = [] for i in range(100): read_data = DataCopyTracer(S.from_sector(1)) io = vol.new_io( queue, i * S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0, ) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback cmpls.append(cmpl) io.submit() for c in cmpls: c.wait() write_data = DataCopyTracer.from_string("TEST DATA" * 100) io = vol.new_io(queue, S.from_sector(1), write_data.size, IoDir.WRITE, 0, 0) io.set_data(write_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() stats = cache.get_stats() ctx.exit() assert (len(DataCopyTracer.needs_erase) == 0 ), "Not all locked Data instances were secure erased!" assert (len(DataCopyTracer.locked_instances) == 0 ), "Not all locked Data instances were unlocked!" assert (stats["req"]["rd_partial_misses"]["value"] + stats["req"]["rd_full_misses"]["value"]) > 0
def test_wo_read_data_consistency(pyocf_ctx): # start sector for each region region_start = [0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] # possible start sectors for test iteration start_sec = [0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] # possible end sectors for test iteration end_sec = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 23] CACHELINE_COUNT = 3 CACHELINE_SIZE = 4096 SECTOR_SIZE = Size.from_sector(1).B CLS = CACHELINE_SIZE // SECTOR_SIZE WORKSET_SIZE = CACHELINE_COUNT * CACHELINE_SIZE WORKSET_OFFSET = 1024 * CACHELINE_SIZE SECTOR_COUNT = int(WORKSET_SIZE / SECTOR_SIZE) ITRATION_COUNT = 200 # fixed test cases fixed_combinations = [ [I, I, D, D, D, D, D, D, D, D, I, I], [I, I, C, C, C, C, C, C, C, C, I, I], [I, I, D, D, D, I, D, D, D, D, I, I], [I, I, D, D, D, I, I, D, D, D, I, I], [I, I, I, I, D, I, I, D, C, D, I, I], [I, D, D, D, D, D, D, D, D, D, D, I], [C, C, I, D, D, I, D, D, D, D, D, I], [D, D, D, D, D, D, D, D, D, D, D, I], ] data = {} # memset n-th sector of core data with n data[SectorStatus.INVALID] = bytes( [x // SECTOR_SIZE for x in range(WORKSET_SIZE)]) # memset n-th sector of clean data with n + 100 data[SectorStatus.CLEAN] = bytes( [100 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)]) # memset n-th sector of dirty data with n + 200 data[SectorStatus.DIRTY] = bytes( [200 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)]) result_b = bytes(WORKSET_SIZE) cache_device = Volume(Size.from_MiB(30)) core_device = Volume(Size.from_MiB(30)) cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WO) core = Core.using_device(core_device) cache.add_core(core) insert_order = [x for x in range(CACHELINE_COUNT)] # generate regions status combinations and shuffle it combinations = [] state_combinations = product(SectorStatus, repeat=len(region_start)) for S in state_combinations: combinations.append(S) random.shuffle(combinations) # add fixed test cases at the beginning combinations = fixed_combinations + combinations for S in combinations[:ITRATION_COUNT]: # write data to core and invalidate all CL cache.change_cache_mode(cache_mode=CacheMode.PT) io_to_exp_obj(core, WORKSET_OFFSET, len(data[SectorStatus.INVALID]), data[SectorStatus.INVALID], 0, IoDir.WRITE) # randomize cacheline insertion order to exercise different # paths with regard to cache I/O physical addresses continuousness random.shuffle(insert_order) sectors = [ insert_order[i // CLS] * CLS + (i % CLS) for i in range(SECTOR_COUNT) ] # insert clean sectors - iterate over cachelines in @insert_order order cache.change_cache_mode(cache_mode=CacheMode.WT) for sec in sectors: region = sector_to_region(sec, region_start) if S[region] != SectorStatus.INVALID: io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE, data[SectorStatus.CLEAN], sec * SECTOR_SIZE, IoDir.WRITE) # write dirty sectors cache.change_cache_mode(cache_mode=CacheMode.WO) for sec in sectors: region = sector_to_region(sec, region_start) if S[region] == SectorStatus.DIRTY: io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE, data[SectorStatus.DIRTY], sec * SECTOR_SIZE, IoDir.WRITE) core_device.reset_stats() for s in start_sec: for e in end_sec: if s > e: continue # issue WO read START = s * SECTOR_SIZE END = e * SECTOR_SIZE size = (e - s + 1) * SECTOR_SIZE assert 0 == io_to_exp_obj( core, WORKSET_OFFSET + START, size, result_b, START, IoDir.READ ), "error reading in WO mode: S={}, start={}, end={}, insert_order={}".format( S, s, e, insert_order) # verify read data for sec in range(s, e + 1): # just check the first byte of sector region = sector_to_region(sec, region_start) check_byte = sec * SECTOR_SIZE assert ( result_b[check_byte] == data[S[region]][check_byte] ), "unexpected data in sector {}, S={}, s={}, e={}, insert_order={}\n".format( sec, S, s, e, insert_order) # WO is not supposed to clean dirty data assert ( core_device.get_stats()[IoDir.WRITE] == 0 ), "unexpected write to core device, S={}, s={}, e={}, insert_order = {}\n".format( S, s, e, insert_order)
def test_seq_cutoff_max_streams(pyocf_ctx): """ Test number of sequential streams tracked by OCF. MAX_STREAMS is the maximal amount of streams which OCF is able to track. 1. Issue MAX_STREAMS requests (write or reads) to cache, 1 sector shorter than seq cutoff threshold 2. Issue MAX_STREAMS-1 requests continuing the streams from 1. to surpass the threshold and check if cutoff was triggered (requests used PT engine) 3. Issue single request to stream not used in 1. or 2. and check if it's been handled by cache 4. Issue single request to stream least recently used in 1. and 2. and check if it's been handled by cache. It should no longer be tracked by OCF, because of request in step 3. which overflowed the OCF handling structure) """ MAX_STREAMS = 256 TEST_STREAMS = MAX_STREAMS + 1 # Number of streams used by test - one more than OCF can track core_size = Size.from_MiB(200) threshold = Size.from_KiB(4) streams = [ Stream( last=Size((stream_no * int(core_size) // TEST_STREAMS), sector_aligned=True), length=Size(0), direction=choice(list(IoDir)), ) for stream_no in range(TEST_STREAMS) ] # Generate MAX_STREAMS + 1 non-overlapping streams # Remove one stream - this is the one we are going to use to overflow OCF tracking structure # in step 3 non_active_stream = choice(streams) streams.remove(non_active_stream) cache = Cache.start_on_device(Volume(Size.from_MiB(200)), cache_mode=CacheMode.WT) core = Core.using_device(Volume(core_size)) cache.add_core(core) cache.set_seq_cut_off_policy(SeqCutOffPolicy.ALWAYS) cache.set_seq_cut_off_threshold(threshold) # STEP 1 shuffle(streams) io_size = threshold - Size.from_sector(1) io_to_streams(core, streams, io_size) stats = cache.get_stats() assert (stats["req"]["serviced"]["value"] == stats["req"]["total"]["value"] == len(streams)), "All request should be serviced - no cutoff" old_serviced = len(streams) # STEP 2 lru_stream = streams[0] streams.remove(lru_stream) shuffle(streams) io_to_streams(core, streams, Size.from_sector(1)) stats = cache.get_stats() assert ( stats["req"]["serviced"]["value"] == old_serviced ), "Serviced requests stat should not increase - cutoff engaged for all" assert stats["req"]["wr_pt"]["value"] + stats["req"]["rd_pt"][ "value"] == len( streams ), "All streams should be handled in PT - cutoff engaged for all streams" # STEP 3 io_to_streams(core, [non_active_stream], Size.from_sector(1)) stats = cache.get_stats() assert ( stats["req"]["serviced"]["value"] == old_serviced + 1 ), "This request should be serviced by cache - no cutoff for inactive stream" # STEP 4 io_to_streams(core, [lru_stream], Size.from_sector(1)) stats = cache.get_stats() assert ( stats["req"]["serviced"]["value"] == old_serviced + 2 ), "This request should be serviced by cache - lru_stream should be no longer tracked"
def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed): CACHELINE_COUNT = 9 SECTOR_SIZE = Size.from_sector(1).B CLS = cacheline_size // SECTOR_SIZE WORKSET_SIZE = CACHELINE_COUNT * cacheline_size WORKSET_OFFSET = 128 * cacheline_size SECTOR_COUNT = int(WORKSET_SIZE / SECTOR_SIZE) ITRATION_COUNT = 50 random.seed(rand_seed) # start sector for each region (positions of '*' on the above diagram) region_start = ([0, 3 * CLS, 4 * CLS - 1] + [4 * CLS + i for i in range(CLS)] + [5 * CLS, 5 * CLS + 1, 6 * CLS]) num_regions = len(region_start) # possible IO start sectors for test iteration (positions of '>' on the above diagram) start_sec = [0, CLS, 2 * CLS, 3 * CLS, 4 * CLS - 2, 4 * CLS - 1 ] + [4 * CLS + i for i in range(CLS)] # possible IO end sectors for test iteration (positions o '<' on the above diagram) end_sec = ([3 * CLS - 1] + [4 * CLS + i for i in range(CLS)] + [ 5 * CLS, 5 * CLS + 1, 6 * CLS - 1, 7 * CLS - 1, 8 * CLS - 1, 9 * CLS - 1 ]) data = {} # memset n-th sector of core data with n << 2 data[SectorStatus.INVALID] = bytes([ get_byte(((x // SECTOR_SIZE) << 2) + 0, x % 4) for x in range(WORKSET_SIZE) ]) # memset n-th sector of clean data with n << 2 + 1 data[SectorStatus.CLEAN] = bytes([ get_byte(((x // SECTOR_SIZE) << 2) + 1, x % 4) for x in range(WORKSET_SIZE) ]) # memset n-th sector of dirty data with n << 2 + 2 data[SectorStatus.DIRTY] = bytes([ get_byte(((x // SECTOR_SIZE) << 2) + 2, x % 4) for x in range(WORKSET_SIZE) ]) result_b = bytes(WORKSET_SIZE) cache_device = Volume(Size.from_MiB(30)) core_device = Volume(Size.from_MiB(30)) cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WO, cache_line_size=cacheline_size) core = Core.using_device(core_device) cache.add_core(core) insert_order = list(range(CACHELINE_COUNT)) # set fixed generated sector statuses region_statuses = [ [I, I, I] + [I for i in range(CLS)] + [I, I, I], [I, I, I] + [D for i in range(CLS)] + [I, I, I], [I, I, I] + [C for i in range(CLS)] + [I, I, I], [I, I, I] + [D for i in range(CLS // 2 - 1)] + [I] + [D for i in range(CLS // 2)] + [I, I, I], [I, I, I] + [D for i in range(CLS // 2 - 1)] + [I, I] + [D for i in range(CLS // 2 - 1)] + [I, I, I], [I, I, I] + [D for i in range(CLS // 2 - 2)] + [I, I, D, C] + [D for i in range(CLS // 2 - 2)] + [I, I, I], [I, I, D] + [D for i in range(CLS)] + [D, I, I], [I, I, D] + [D for i in range(CLS // 2 - 1)] + [I] + [D for i in range(CLS // 2)] + [D, I, I], ] # add randomly generated sector statuses for _ in range(ITRATION_COUNT - len(region_statuses)): region_statuses.append( [random.choice(list(SectorStatus)) for _ in range(num_regions)]) # iterate over generated status combinations and perform the test for region_state in region_statuses: # write data to core and invalidate all CL and write data pattern to core cache.change_cache_mode(cache_mode=CacheMode.PT) io_to_exp_obj( core, WORKSET_OFFSET, len(data[SectorStatus.INVALID]), data[SectorStatus.INVALID], 0, IoDir.WRITE, ) # randomize cacheline insertion order to exercise different # paths with regard to cache I/O physical addresses continuousness random.shuffle(insert_order) sectors = [ insert_order[i // CLS] * CLS + (i % CLS) for i in range(SECTOR_COUNT) ] # insert clean sectors - iterate over cachelines in @insert_order order cache.change_cache_mode(cache_mode=CacheMode.WT) for sec in sectors: region = sector_to_region(sec, region_start) if region_state[region] != SectorStatus.INVALID: io_to_exp_obj( core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE, data[SectorStatus.CLEAN], sec * SECTOR_SIZE, IoDir.WRITE, ) # write dirty sectors cache.change_cache_mode(cache_mode=CacheMode.WB) for sec in sectors: region = sector_to_region(sec, region_start) if region_state[region] == SectorStatus.DIRTY: io_to_exp_obj( core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE, data[SectorStatus.DIRTY], sec * SECTOR_SIZE, IoDir.WRITE, ) cache.change_cache_mode(cache_mode=cache_mode) core_device.reset_stats() # get up to 32 randomly selected pairs of (start,end) sectors # 32 is enough to cover all combinations for 4K and 8K cacheline size io_ranges = [(s, e) for s, e in product(start_sec, end_sec) if s < e] random.shuffle(io_ranges) io_ranges = io_ranges[:32] # run the test for each selected IO range for currently set up region status for start, end in io_ranges: print_test_case(region_start, region_state, start, end, SECTOR_COUNT, CLS) # issue read START = start * SECTOR_SIZE END = end * SECTOR_SIZE size = (end - start + 1) * SECTOR_SIZE assert 0 == io_to_exp_obj( core, WORKSET_OFFSET + START, size, result_b, START, IoDir.READ ), "error reading in {}: region_state={}, start={}, end={}, insert_order={}".format( cache_mode, region_state, start, end, insert_order) # verify read data for sec in range(start, end + 1): # just check the first 32bits of sector (this is the size of fill pattern) region = sector_to_region(sec, region_start) start_byte = sec * SECTOR_SIZE expected_data = bytes_to_uint32( data[region_state[region]][start_byte + 0], data[region_state[region]][start_byte + 1], data[region_state[region]][start_byte + 2], data[region_state[region]][start_byte + 3], ) actual_data = bytes_to_uint32( result_b[start_byte + 0], result_b[start_byte + 1], result_b[start_byte + 2], result_b[start_byte + 3], ) assert ( actual_data == expected_data ), "unexpected data in sector {}, region_state={}, start={}, end={}, insert_order={}\n".format( sec, region_state, start, end, insert_order) if cache_mode == CacheMode.WO: # WO is not supposed to clean dirty data assert ( core_device.get_stats()[IoDir.WRITE] == 0 ), "unexpected write to core device, region_state={}, start={}, end={}, insert_order = {}\n".format( region_state, start, end, insert_order)