def test_read_lba_data(nvme0): lba = int(sg.PopupGetText("Which LBA to read?", "pynvme")) q = d.Qpair(nvme0, 10) b = d.Buffer(512, "LBA 0x%08x" % lba) nvme0n1 = d.Namespace(nvme0) nvme0n1.read(q, b, lba).waitdone() sg_show_hex_buffer(b)
def test_sanitize(nvme0: d.Controller, buf): if nvme0.id_data(331, 328) == 0: pytest.skip("sanitize operation is not supported") import PySimpleGUI as sg logging.info("supported sanitize operation: %d" % nvme0.id_data(331, 328)) sg.OneLineProgressMeter('sanitize progress', 0, 100, 'progress', orientation='h') nvme0n1 = d.Namespace(nvme0, 1, 128 * 1000 * 1000 // 4) nvme0.sanitize().waitdone() # sanitize clears namespace # check sanitize status in log page nvme0.getlogpage(0x81, buf, 20).waitdone() while buf.data(3, 2) & 0x7 != 1: # sanitize operation is not completed time.sleep(1) nvme0.getlogpage(0x81, buf, 20).waitdone() progress = buf.data(1, 0) * 100 // 0xffff sg.OneLineProgressMeter('sanitize progress', progress, 100, 'progress', orientation='h') logging.info("%d%%" % progress) nvme0n1.close()
def test_quarch_dirty_power_cycle_single(nvme0, poweron=None, poweroff=None): region_end = 256*1000*1000 # 1GB qdepth = min(1024, 1+(nvme0.cap&0xffff)) # get the unsafe shutdown count def power_cycle_count(): buf = d.Buffer(4096) nvme0.getlogpage(2, buf, 512).waitdone() return buf.data(115, 112) # run the test one by one subsystem = d.Subsystem(nvme0, poweron, poweroff) nvme0n1 = d.Namespace(nvme0, 1, region_end) assert True == nvme0n1.verify_enable(True) orig_unsafe_count = power_cycle_count() logging.info("power cycle count: %d" % orig_unsafe_count) # 128K random write cmdlog_list = [None]*1000 with nvme0n1.ioworker(io_size=256, lba_random=True, read_percentage=0, region_end=256*1000*1000, time=30, qdepth=qdepth, output_cmdlog_list=cmdlog_list): # sudden power loss before the ioworker end time.sleep(10) subsystem.poweroff() # power on and reset controller time.sleep(5) subsystem.poweron() nvme0.reset() # verify data in cmdlog_list logging.info(cmdlog_list[-10:]) read_buf = d.Buffer(256*512) qpair = d.Qpair(nvme0, 10) for cmd in cmdlog_list: slba = cmd[0] nlba = cmd[1] op = cmd[2] if nlba and op==1: def read_cb(cdw0, status1): nonlocal slba if status1>>1: logging.info("slba 0x%x, status 0x%x" % (slba, status1>>1)) #logging.info("verify slba 0x%x, nlba %d" % (slba, nlba)) nvme0n1.read(qpair, read_buf, slba, nlba, cb=read_cb).waitdone() # re-write to clear CRC mismatch nvme0n1.write(qpair, read_buf, slba, nlba, cb=read_cb).waitdone() qpair.delete() nvme0n1.close() # verify unsafe shutdown count unsafe_count = power_cycle_count() logging.info("power cycle count: %d" % unsafe_count) assert unsafe_count == orig_unsafe_count+1
def test_verify_partial_namespace(nvme0): region_end = 1024 * 1024 * 1024 // 512 # 1GB space nvme0n1 = d.Namespace(nvme0, 1, region_end) assert True == nvme0n1.verify_enable(True) nvme0n1.ioworker(io_size=8, lba_random=True, region_end=region_end, read_percentage=50, time=30).start().close()
def subprocess_trim(pciaddr, loops): nvme0 = d.Controller(pciaddr) nvme0n1 = d.Namespace(nvme0) q = d.Qpair(nvme0, 8) buf = d.Buffer(4096) buf.set_dsm_range(0, 8, 8) # send trim commands for i in range(loops): nvme0n1.dsm(q, buf, 1).waitdone()
def test_two_namespace_ioworkers(nvme0n1, nvme0): nvme1 = d.Controller(b'03:00.0') nvme1n1 = d.Namespace(nvme1) with nvme0n1.ioworker(io_size=8, lba_align=16, lba_random=True, qdepth=16, read_percentage=0, time=100), \ nvme1n1.ioworker(io_size=8, lba_align=16, lba_random=True, qdepth=16, read_percentage=0, time=100): pass
def test_init_nvme_customerized(pcie): def nvme_init(nvme0): logging.info("user defined nvme init") nvme0[0x14] = 0 while not (nvme0[0x1c] & 0x1) == 0: pass # 3. set admin queue registers nvme0.init_adminq() # 4. set register cc nvme0[0x14] = 0x00460000 # 5. enable cc.en nvme0[0x14] = 0x00460001 # 6. wait csts.rdy to 1 while not (nvme0[0x1c] & 0x1) == 1: pass # 7. identify controller nvme0.identify(d.Buffer(4096)).waitdone() # 8. create and identify all namespace nvme0.init_ns() # 9. set/get num of queues, 2 IO queues nvme0.setfeatures(0x7, cdw11=0x00010001).waitdone() nvme0.init_queues(nvme0.getfeatures(0x7).waitdone()) # 10. send out all aer aerl = nvme0.id_data(259) + 1 for i in range(aerl): nvme0.aer() # 1. set pcie registers pcie.aspm = 0 # 2. disable cc.en and wait csts.rdy to 0 nvme0 = d.Controller(pcie, nvme_init_func=nvme_init) # test with ioworker nvme0n1 = d.Namespace(nvme0) qpair = d.Qpair(nvme0, 10) nvme0n1.ioworker(time=1).start().close() qpair2 = d.Qpair(nvme0, 10) with pytest.raises(d.QpairCreationError): qpair3 = d.Qpair(nvme0, 10) qpair.delete() qpair2.delete() nvme0n1.close()
def test_two_namespace_ioworkers(nvme0n1, nvme0, verify, tcp): nvme1 = d.Controller(tcp) nvme1n1 = d.Namespace(nvme1) with nvme0n1.ioworker(io_size=8, lba_align=16, lba_random=True, qdepth=16, read_percentage=0, time=1), \ nvme1n1.ioworker(io_size=8, lba_align=16, lba_random=True, qdepth=16, read_percentage=0, time=1): pass nvme1n1.close()
def test_nvme_tcp_ioworker(tcp): c = d.Controller(tcp) n = d.Namespace(c, 1) n.ioworker(io_size=8, lba_align=8, region_start=0, region_end=0x100, lba_random=False, qdepth=4, read_percentage=50, time=15).start().close() n.close()
def test_multiple_controllers_and_namespaces(pciaddr): # address list of the devices to test addr_list = ['01:00.0', '02:00.0', '03:00.0', '04:00.0'] addr_list = [ pciaddr, ] test_seconds = 10 # create all controllers and namespace pcie_list = [d.Pcie(a) for a in addr_list] nvme_list = [d.Controller(p) for p in pcie_list] ns_list = [d.Namespace(n) for n in nvme_list] # create two ioworkers on each namespace ioworkers = [] for ns in ns_list: w = ns.ioworker( io_size=8, lba_align=8, region_start=0, region_end=256 * 1024 * 8, # 1GB space lba_random=False, qdepth=64, read_percentage=100, time=test_seconds).start() ioworkers.append(w) w = ns.ioworker(io_size=8, lba_align=16, region_start=256 * 1024 * 8, region_end=2 * 256 * 1024 * 8, lba_random=True, qdepth=256, read_percentage=0, time=test_seconds).start() ioworkers.append(w) # collect test results io_total = 0 for w in ioworkers: r = w.close() io_total += (r.io_count_read + r.io_count_nonread) logging.info("total throughput: %d IOPS" % (io_total / test_seconds)) for n in ns_list: n.close() for p in pcie_list: p.close()
def subprocess_trim(pciaddr, seconds): pcie = d.Pcie(pciaddr) nvme0 = d.Controller(pcie, True) nvme0n1 = d.Namespace(nvme0) q = d.Qpair(nvme0, 8) buf = d.Buffer(4096) buf.set_dsm_range(0, 8, 8) # send trim commands start = time.time() while time.time() - start < seconds: nvme0n1.dsm(q, buf, 1).waitdone() q.delete() nvme0n1.close() pcie.close()
def test_read_multiple_devices_50hr(verify): assert verify # address list of the devices to test addr_list = [b'71:00.0', b'72:00.0', b'02:00.0', b'03:00.0'] test_seconds = 50*3600 nvme_list = [d.Controller(a) for a in addr_list] ns_list = [d.Namespace(n) for n in nvme_list] # operations on multiple controllers for nvme in nvme_list: logging.info("device: %s" % nvme.id_data(63, 24, str)) logging.info("sequential write to fill the whole namespace") ioworkers = {} for ns in ns_list: lba_max = ns.id_data(7, 0) io_size = 128 # 64K a = ns.ioworker(io_size=io_size, lba_random=False, qdepth=16, read_percentage=0, io_count=lba_max//io_size).start() ioworkers[ns] = a # wait for all ioworker done [ioworkers[ns].close() for ns in ioworkers] logging.info("4K read for 500hr") ioworkers = {} for ns in ns_list: a = ns.ioworker(io_size=8, lba_random=True, qdepth=16, read_percentage=100, time=test_seconds).start() ioworkers[ns] = a # display progress for i in range(test_seconds): time.sleep(1) buf = d.Buffer(512) for nvme in nvme_list: nvme.getlogpage(2, buf).waitdone() logging.info("%9d: %s data units read %d" % (i, nvme.id_data(63, 24, str), buf.data(47, 32))) # wait for all ioworker done [ioworkers[ns].close() for ns in ioworkers]
def test_spdk_summit_demo(nvme0, nvme0n1): logging.info("writing to PCIe SSD and monitoring the temperature") nvmt = d.Controller(b'127.0.0.1:4420') with nvme0n1.ioworker(io_size=8, lba_align=8, lba_random=False, qdepth=10, read_percentage=33, time=10), \ nvme0n1.ioworker(io_size=8, lba_align=8, lba_random=False, qdepth=50, read_percentage=67, time=20): # read the SMART temperature smart_log = d.Buffer(512, "smart log") for i in range(30): for n in (nvme0, nvmt): n.getlogpage(0x02, smart_log, 512).waitdone() ktemp = smart_log.data(2, 1) logging.info("temperature %d: %0.2f degreeC" % (i, k2c(ktemp))) time.sleep(1) test_hello_world(nvmt, d.Namespace(nvmt))
def test_namespace_multiple(buf): # create all controllers and namespace addr_list = [ '3d:00.0', ] # add more DUT BDF here pcie_list = [d.Pcie(a) for a in addr_list] for p in pcie_list: nvmex = d.Controller(p) qpair = d.Qpair(nvmex, 8) nvmexn1 = d.Namespace(nvmex) #Check if support write uncorrectable command wuecc_support = nvmex.id_data(521, 520) & 0x2 if wuecc_support != 0: nvmexn1.write_uncorrectable(qpair, 0, 8).waitdone() with pytest.warns(UserWarning, match="ERROR status: 02/81"): nvmexn1.read(qpair, buf, 0, 8).waitdone() nvmexn1.write(qpair, buf, 0, 8).waitdone() def this_read_cb(dword0, status1): assert status1 >> 1 == 0 nvmexn1.write_uncorrectable(qpair, 0, 8) nvmexn1.read(qpair, buf, 0, 8, cb=this_read_cb).waitdone(2) def another_read_cb(dword0, status1): logging.info("dword0: 0x%08x" % dword0) logging.info("phase bit: %d" % (status1 & 1)) logging.info("dnr: %d" % ((status1 >> 15) & 1)) logging.info("more: %d" % ((status1 >> 14) & 1)) logging.info("sct: 0x%x" % ((status1 >> 9) & 0x7)) logging.info("sc: 0x%x" % ((status1 >> 1) & 0xff)) with pytest.warns(UserWarning, match="ERROR status: 02/81"): nvmexn1.read(qpair, buf, 0, 8, cb=another_read_cb).waitdone() qpair.delete() nvmexn1.close() p.close()
def nvme0n1(nvme0): ret = d.Namespace(nvme0, 1, region_end) yield ret ret.close()
def test_reset_within_ioworker(nvme0, repeat): region_end = 256 * 1000 * 1000 # 1GB qdepth = min(1024, 1 + (nvme0.cap & 0xffff)) # get the unsafe shutdown count def power_cycle_count(): buf = d.Buffer(4096) nvme0.getlogpage(2, buf, 512).waitdone() return buf.data(115, 112) # run the test one by one subsystem = d.Subsystem(nvme0) nvme0n1 = d.Namespace(nvme0, 1, region_end) orig_unsafe_count = power_cycle_count() logging.info("power cycle count: %d" % orig_unsafe_count) # 128K random write cmdlog_list = [None] * 1000 with nvme0n1.ioworker(io_size=256, lba_random=True, read_percentage=30, region_end=region_end, time=10, qdepth=qdepth, output_cmdlog_list=cmdlog_list): # sudden power loss before the ioworker end time.sleep(5) nvme0.reset() # verify data in cmdlog_list time.sleep(5) assert True == nvme0n1.verify_enable(True) logging.info(cmdlog_list[-10:]) read_buf = d.Buffer(256 * 512) qpair = d.Qpair(nvme0, 10) for cmd in cmdlog_list: slba = cmd[0] nlba = cmd[1] op = cmd[2] if nlba: def read_cb(cdw0, status1): nonlocal _slba if status1 >> 1: logging.info("slba %d, 0x%x, _slba 0x%x, status 0x%x" % \ (slba, slba, _slba, status1>>1)) logging.debug("verify slba %d, nlba %d" % (slba, nlba)) _nlba = nlba // 16 for i in range(16): _slba = slba + i * _nlba nvme0n1.read(qpair, read_buf, _slba, _nlba, cb=read_cb).waitdone() # re-write to clear CRC mismatch nvme0n1.write(qpair, read_buf, slba, nlba, cb=read_cb).waitdone() qpair.delete() nvme0n1.close() # verify unsafe shutdown count unsafe_count = power_cycle_count() logging.info("power cycle count: %d" % unsafe_count) assert unsafe_count == orig_unsafe_count
def test_two_namespace_basic(nvme0n1, nvme0, verify, tcp): nvme1 = d.Controller(tcp) nvme1n1 = d.Namespace(nvme1) q1 = d.Qpair(nvme0, 32) q2 = d.Qpair(nvme1, 64) buf = d.Buffer(512) buf1 = d.Buffer(512) buf2 = d.Buffer(512) nvme0n1.write_zeroes(q1, 11, 1).waitdone() nvme0n1.write_zeroes(q1, 22, 1).waitdone() nvme1n1.write_zeroes(q2, 11, 1).waitdone() logging.info("controller0 namespace size: %d" % nvme0n1.id_data(7, 0)) logging.info("controller1 namespace size: %d" % nvme1n1.id_data(7, 0)) assert nvme0n1.id_data(7, 0) != nvme1n1.id_data(7, 0) # test nvme0n1 nvme0n1.read(q1, buf1, 11, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 0 assert buf1[504] == 0 nvme0n1.write(q1, buf, 11, 1).waitdone() nvme0n1.read(q1, buf1, 11, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 11 # test nvme1n1 nvme1n1.read(q2, buf2, 11, 1).waitdone() #print(buf2.dump()) assert buf2[0] == 0 assert buf2[504] == 0 nvme1n1.write(q2, buf, 11, 1).waitdone() nvme1n1.read(q2, buf2, 11, 1).waitdone() #print(buf2.dump()) assert buf2[0] == 11 assert buf1[:] != buf2[:] # test nvme0n1 again nvme0n1.read(q1, buf1, 11, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 11 nvme0n1.write(q1, buf, 11, 1).waitdone() nvme0n1.read(q1, buf1, 11, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 11 nvme0n1.read(q1, buf1, 22, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 0 assert buf1[504] == 0 nvme0n1.write(q1, buf, 22, 1).waitdone() nvme0n1.read(q1, buf1, 22, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 22 nvme0.cmdlog(15) nvme1.cmdlog(15) q1.cmdlog(15) q2.cmdlog(15) nvme1n1.close() q1.delete() q2.delete()
def nvme0n1(nvme0): # skip crc calc in write ret = d.Namespace(nvme0, 1, 1) yield ret ret.close()
def nvme0n1(nvme0): ret = d.Namespace(nvme0, 1) yield ret ret.close() del ret