def test_swl_only(nvme0: d.Controller, nvme0n1: d.Namespace, verify): logging.info("format") nvme0n1.format(512) io_size = 128 ns_size = nvme0n1.id_data(7, 0) io_count = ns_size//io_size logging.info("fill whole drive") nvme0n1.ioworker(io_size=io_size, lba_random=False, io_count=io_count, read_percentage=0).start().close() io_per_second = [] logging.info("write hot sequential data") # 10GB seq write nvme0n1.ioworker(io_size=8, lba_random=False, region_end=10*1024*1024*1024//512, #10GB read_percentage=0, time=10*3600, output_io_per_second=io_per_second).start().close() logging.info(io_per_second) logging.info("verify whole drive") nvme0n1.ioworker(io_size=io_size, lba_random=False, io_count=io_count, read_percentage=100).start().close() plt.plot(io_per_second) plt.ylim(bottom=0) plt.xlim(left=0) plt.show()
def test_hello_world(nvme0, nvme0n1: d.Namespace): read_buf = d.Buffer(512) data_buf = d.Buffer(512) data_buf[10:21] = b'hello world' qpair = d.Qpair(nvme0, 16) # create IO SQ/CQ pair, with 16 queue-depth assert read_buf[10:21] != b'hello world' def write_cb(cdw0, status1): # command callback function nvme0n1.read(qpair, read_buf, 0, 1) nvme0n1.write(qpair, data_buf, 0, 1, cb=write_cb) qpair.waitdone(2) assert read_buf[10:21] == b'hello world'
def test_write_and_read_to_eol(nvme0, subsystem, nvme0n1: d.Namespace, verify): assert verify # format drive nvme0n1.format() lba_count = nvme0n1.id_data(7, 0) # test for PE cycles for i in range(TEST_LOOPS): logging.info(f"loop {i} start") # write 1 pass of whole drive io_size = 64*1024//512 # 64KB write_start = time.time() nvme0n1.ioworker(io_size, io_size, False, 0, io_count=lba_count//io_size).start().close() write_duration = time.time()-write_start logging.info("full drive write %d seconds" % write_duration) assert write_duration < 1800 # power cycle subsystem.power_cycle(15) # read part of drive read_time = 1800-write_duration nvme0n1.ioworker(io_size, io_size, False, 100, read_time, region_end=lba_count//100).start().close() logging.info(f"loop {i} finish") # power cycle subsystem.power_cycle(15)
def test_dsm_trim(nvme0: d.Controller, nvme0n1: d.Namespace, qpair: d.Qpair): trimbuf = d.Buffer(4096) # DUT info logging.info("model number: %s" % nvme0.id_data(63, 24, str)) logging.info("firmware revision: %s" % nvme0.id_data(71, 64, str)) # single range start_lba = 0 lba_count = 8 * 1024 trimbuf.set_dsm_range(0, start_lba, lba_count) nvme0n1.dsm(qpair, trimbuf, 1, attribute=0x4).waitdone() # multiple range lba_count = lba_count // 256 for i in range(256): trimbuf.set_dsm_range(i, start_lba + i * lba_count, lba_count) nvme0n1.dsm(qpair, trimbuf, 256).waitdone()
def test_trim_basic(nvme0: d.Controller, nvme0n1: d.Namespace, verify): GB = 1024 * 1024 * 1024 all_zero_databuf = d.Buffer(512) trimbuf = d.Buffer(4096) q = d.Qpair(nvme0, 32) # DUT info logging.info("model number: %s" % nvme0.id_data(63, 24, str)) logging.info("firmware revision: %s" % nvme0.id_data(71, 64, str)) # write logging.info("write data in 10G ~ 20G") io_size = 128 * 1024 // 512 start_lba = 10 * GB // 512 lba_count = 10 * GB // 512 nvme0n1.ioworker(io_size=io_size, lba_align=io_size, lba_random=False, read_percentage=0, lba_start=start_lba, io_count=lba_count // io_size, qdepth=128).start().close() # verify data after write, data should be modified with pytest.warns(UserWarning, match="ERROR status: 02/85"): nvme0n1.compare(q, all_zero_databuf, start_lba).waitdone() # get the empty trim time trimbuf.set_dsm_range(0, 0, 0) trim_cmd = nvme0n1.dsm(q, trimbuf, 1).waitdone() # first call is longer, due to cache? start_time = time.time() trim_cmd = nvme0n1.dsm(q, trimbuf, 1).waitdone() empty_trim_time = time.time() - start_time # the trim time on device-side only logging.info("trim the 10G data from LBA 0x%lx" % start_lba) trimbuf.set_dsm_range(0, start_lba, lba_count) start_time = time.time() trim_cmd = nvme0n1.dsm(q, trimbuf, 1).waitdone() trim_time = time.time() - start_time - empty_trim_time logging.info("trim bandwidth: %0.2fGB/s" % (10 / trim_time)) # verify after trim nvme0n1.compare(q, all_zero_databuf, start_lba).waitdone()
def test_ioworker_simplified(nvme0, nvme0n1: d.Namespace, qpair): nvme0n1.ioworker(time=1).start().close() nvme0n1.ioworker(io_size=[1, 2, 3, 7, 8, 16], time=1).start().close() nvme0n1.ioworker(op_percentage={ 2: 10, 1: 20, 0: 30, 9: 40 }, time=1).start().close() test_hello_world(nvme0, nvme0n1, qpair)
def test_swl_with_gc(nvme0: d.Controller, nvme0n1: d.Namespace, verify): import matplotlib.pyplot as plt logging.info("format") nvme0n1.format(512) io_size = 128 ns_size = nvme0n1.id_data(7, 0) io_count = ns_size // io_size logging.info("fill whole drive") nvme0n1.ioworker(io_size=io_size, lba_random=False, io_count=io_count, read_percentage=0).start().close() distribution = [0] * 100 for i in [0, 3, 11, 28, 60, 71, 73, 88, 92, 98]: distribution[i] = 1000 io_per_second = [] logging.info("write hot random data") r = nvme0n1.ioworker(io_size=8, lba_random=True, distribution=distribution, read_percentage=0, time=10 * 3600, output_io_per_second=io_per_second).start().close() logging.info(io_per_second) logging.info(r) logging.info("verify whole drive") nvme0n1.ioworker(io_size=io_size, lba_random=False, io_count=io_count, read_percentage=100).start().close() plt.plot(io_per_second) plt.ylim(bottom=0) plt.show()
def nvme0n1(nvme0): ret = Namespace(nvme0, 1, 0x10000) yield ret ret.close()
def test_ioworker_simplified(nvme0, nvme0n1: d.Namespace): nvme0n1.ioworker(io_size=[1, 2, 3, 7, 8, 16], time=1).start().close() test_hello_world(nvme0, nvme0n1)