def test_init_nvme_customerized(pcie): def nvme_init(nvme0): logging.info("user defined nvme init") nvme0[0x14] = 0 while not (nvme0[0x1c] & 0x1) == 0: pass # 3. set admin queue registers nvme0.init_adminq() # 4. set register cc nvme0[0x14] = 0x00460000 # 5. enable cc.en nvme0[0x14] = 0x00460001 # 6. wait csts.rdy to 1 while not (nvme0[0x1c] & 0x1) == 1: pass # 7. identify controller nvme0.identify(d.Buffer(4096)).waitdone() # 8. create and identify all namespace nvme0.init_ns() # 9. set/get num of queues, 2 IO queues nvme0.setfeatures(0x7, cdw11=0x00010001).waitdone() nvme0.init_queues(nvme0.getfeatures(0x7).waitdone()) # 10. send out all aer aerl = nvme0.id_data(259) + 1 for i in range(aerl): nvme0.aer() # 1. set pcie registers pcie.aspm = 0 # 2. disable cc.en and wait csts.rdy to 0 nvme0 = d.Controller(pcie, nvme_init_func=nvme_init) # test with ioworker nvme0n1 = d.Namespace(nvme0) qpair = d.Qpair(nvme0, 10) nvme0n1.ioworker(time=1).start().close() qpair2 = d.Qpair(nvme0, 10) with pytest.raises(d.QpairCreationError): qpair3 = d.Qpair(nvme0, 10) qpair.delete() qpair2.delete() nvme0n1.close()
def test_powercycle_with_qpair(nvme0, nvme0n1, buf, subsystem): qpair = d.Qpair(nvme0, 16) nvme0n1.read(qpair, buf, 0).waitdone() # delete qpair before power cycle, and then reset controller, recreate qpair qpair.delete() subsystem.power_cycle(10) nvme0.reset() qpair = d.Qpair(nvme0, 16) nvme0n1.read(qpair, buf, 0).waitdone() qpair.delete()
def test_reset_time(pcie): def nvme_init(nvme0): logging.info("user defined nvme init") nvme0[0x14] = 0 while not (nvme0[0x1c] & 0x1) == 0: pass logging.info(time.time()) # 3. set admin queue registers nvme0.init_adminq() logging.info(time.time()) # 5. enable cc.en nvme0[0x14] = 0x00460001 # 6. wait csts.rdy to 1 while not (nvme0[0x1c] & 0x1) == 1: pass logging.info(time.time()) # 7. identify controller nvme0.identify(d.Buffer(4096)).waitdone() logging.info(time.time()) nvme0.setfeatures(0x7, cdw11=0x00ff00ff).waitdone() nvme0.init_queues(nvme0.getfeatures(0x7).waitdone()) logging.info("1: nvme init") logging.info(time.time()) nvme0 = d.Controller(pcie, nvme_init_func=nvme_init) subsystem = d.Subsystem(nvme0) qpair = d.Qpair(nvme0, 10) qpair2 = d.Qpair(nvme0, 10) qpair3 = d.Qpair(nvme0, 10) qpair.delete() qpair2.delete() qpair3.delete() logging.info("2: nvme reset") logging.info(time.time()) nvme0.reset() logging.info("3: power cycle") subsystem.poweroff() logging.info(time.time()) subsystem.poweron() nvme0.reset()
def test_read_lba_data(nvme0, nvme0n1): lba = sg.PopupGetText("Which LBA to read?", "pynvme") lba = int(lba, 0) # convert to number q = d.Qpair(nvme0, 10) b = d.Buffer(512, "LBA 0x%08x" % lba) nvme0n1.read(q, b, lba).waitdone() sg_show_hex_buffer(b)
def do_power_cycle(dirty, subsystem, nvme0n1, nvme0): if not dirty: # notify drive for a clean shutdown start_time = time.time() subsystem.shutdown_notify() logging.info("notify time %.6f sec" % (time.time()-start_time)) # boot again csv_start = time.time() start_time = time.time() subsystem.power_cycle(10) nvme0.reset() logging.info("init time %.6f sec" % (time.time()-start_time-10)) # first read time start_time = time.time() q = d.Qpair(nvme0, 16) b = d.Buffer(512) lba = nvme0n1.id_data(7, 0) - 1 nvme0n1.read(q, b, lba).waitdone() logging.info("media ready time %.6f sec" % (time.time()-start_time)) q.delete() # report to csv ready_time = time.time()-csv_start-10 with open("report.csv", "a") as f: f.write('%.6f\n' % ready_time)
def test_read_lba_data(nvme0): lba = int(sg.PopupGetText("Which LBA to read?", "pynvme")) q = d.Qpair(nvme0, 10) b = d.Buffer(512, "LBA 0x%08x" % lba) nvme0n1 = d.Namespace(nvme0) nvme0n1.read(q, b, lba).waitdone() sg_show_hex_buffer(b)
def test_quarch_dirty_power_cycle_single(nvme0, poweron=None, poweroff=None): region_end = 256*1000*1000 # 1GB qdepth = min(1024, 1+(nvme0.cap&0xffff)) # get the unsafe shutdown count def power_cycle_count(): buf = d.Buffer(4096) nvme0.getlogpage(2, buf, 512).waitdone() return buf.data(115, 112) # run the test one by one subsystem = d.Subsystem(nvme0, poweron, poweroff) nvme0n1 = d.Namespace(nvme0, 1, region_end) assert True == nvme0n1.verify_enable(True) orig_unsafe_count = power_cycle_count() logging.info("power cycle count: %d" % orig_unsafe_count) # 128K random write cmdlog_list = [None]*1000 with nvme0n1.ioworker(io_size=256, lba_random=True, read_percentage=0, region_end=256*1000*1000, time=30, qdepth=qdepth, output_cmdlog_list=cmdlog_list): # sudden power loss before the ioworker end time.sleep(10) subsystem.poweroff() # power on and reset controller time.sleep(5) subsystem.poweron() nvme0.reset() # verify data in cmdlog_list logging.info(cmdlog_list[-10:]) read_buf = d.Buffer(256*512) qpair = d.Qpair(nvme0, 10) for cmd in cmdlog_list: slba = cmd[0] nlba = cmd[1] op = cmd[2] if nlba and op==1: def read_cb(cdw0, status1): nonlocal slba if status1>>1: logging.info("slba 0x%x, status 0x%x" % (slba, status1>>1)) #logging.info("verify slba 0x%x, nlba %d" % (slba, nlba)) nvme0n1.read(qpair, read_buf, slba, nlba, cb=read_cb).waitdone() # re-write to clear CRC mismatch nvme0n1.write(qpair, read_buf, slba, nlba, cb=read_cb).waitdone() qpair.delete() nvme0n1.close() # verify unsafe shutdown count unsafe_count = power_cycle_count() logging.info("power cycle count: %d" % unsafe_count) assert unsafe_count == orig_unsafe_count+1
def test_trim_time_all_range_buffer(nvme0, nvme0n1, repeat, io_size): q = d.Qpair(nvme0, 8) buf = d.Buffer(4096) for i in range(4096 // 16): buf.set_dsm_range(i, i * io_size, io_size) start_time = time.time() nvme0n1.dsm(q, buf, 1).waitdone() with open("report.csv", "a") as f: f.write('%.6f\n' % (time.time() - start_time))
def test_io_qpair_msix_interrupt_mask(nvme0, nvme0n1, buf): q = d.Qpair(nvme0, 8) q.msix_clear() assert not q.msix_isset() nvme0n1.read(q, buf, 0, 8) time.sleep(1) assert q.msix_isset() q.waitdone() q.msix_clear() assert not q.msix_isset() nvme0n1.read(q, buf, 0, 8) time.sleep(1) assert q.msix_isset() q.waitdone() q.msix_clear() q.msix_mask() assert not q.msix_isset() nvme0n1.read(q, buf, 0, 8) assert not q.msix_isset() time.sleep(1) assert not q.msix_isset() q.msix_unmask() time.sleep(1) assert q.msix_isset() q.waitdone() q2 = d.Qpair(nvme0, 8) q.msix_clear() q2.msix_clear() assert not q.msix_isset() assert not q2.msix_isset() nvme0n1.read(q2, buf, 0, 8) time.sleep(1) assert not q.msix_isset() assert q2.msix_isset() q2.waitdone() q.delete() q2.delete()
def subprocess_trim(pciaddr, loops): nvme0 = d.Controller(pciaddr) nvme0n1 = d.Namespace(nvme0) q = d.Qpair(nvme0, 8) buf = d.Buffer(4096) buf.set_dsm_range(0, 8, 8) # send trim commands for i in range(loops): nvme0n1.dsm(q, buf, 1).waitdone()
def test_trim_time_one_range(nvme0, nvme0n1, lba_count, repeat): q = d.Qpair(nvme0, 8) buf = d.Buffer(4096) if lba_count == 0: lba_count = nvme0n1.id_data(7, 0) # all lba buf.set_dsm_range(0, 0, lba_count) start_time = time.time() nvme0n1.dsm(q, buf, 1).waitdone() with open("report.csv", "a") as f: f.write('%.6f\n' % (time.time() - start_time))
def test_hello_world(nvme0, nvme0n1: d.Namespace): read_buf = d.Buffer(512) data_buf = d.Buffer(512) data_buf[10:21] = b'hello world' qpair = d.Qpair(nvme0, 16) # create IO SQ/CQ pair, with 16 queue-depth assert read_buf[10:21] != b'hello world' def write_cb(cdw0, status1): # command callback function nvme0n1.read(qpair, read_buf, 0, 1) nvme0n1.write(qpair, data_buf, 0, 1, cb=write_cb) qpair.waitdone(2) assert read_buf[10:21] == b'hello world'
def test_create_qpairs(nvme0, nvme0n1, buf): qpair = d.Qpair(nvme0, 1024) nvme0n1.read(qpair, buf, 0) qpair.waitdone() nvme0n1.read(qpair, buf, 0, 8).waitdone() ql = [] for i in range(15): ql.append(d.Qpair(nvme0, 8)) with pytest.raises(d.QpairCreationError): ql.append(d.Qpair(nvme0, 8)) with pytest.warns(UserWarning, match="ioworker host ERROR -1: "): nvme0n1.ioworker(io_size=8, time=1000).start().close() qpair.delete() nvme0n1.ioworker(io_size=8, time=1).start().close() for q in ql: q.delete()
def test_different_io_size_and_count(nvme0, nvme0n1, lba_offset, lba_count, io_count): # IO Qpair for IO commands io_qpair = d.Qpair(nvme0, 64) # allcoate all DMA buffers for IO commands bufs = [] for i in range(io_count): bufs.append(d.Buffer(lba_count * 512)) # send and reap all IO command dwords for i in range(io_count): nvme0n1.read(io_qpair, bufs[i], lba_offset, lba_count) io_qpair.waitdone(io_count)
def test_fused_operations(nvme0, nvme0n1): # create qpair and buffer for IO commands q = d.Qpair(nvme0, 10) b = d.Buffer() # separate compare and write commands nvme0n1.write(q, b, 8).waitdone() nvme0n1.compare(q, b, 8).waitdone() # implement fused compare and write operations with generic commands # Controller.send_cmd() sends admin commands, # and Namespace.send_cmd() here sends IO commands. nvme0n1.send_cmd(5 | (1 << 8), q, b, 1, 8, 0, 0) nvme0n1.send_cmd(1 | (1 << 9), q, b, 1, 8, 0, 0) q.waitdone(2)
def test_uct06_configuring_locking_objects_powercycle(nvme0, nvme0n1, subsystem, verify, comid, new_passwd=b'123456'): subsystem.poweroff() time.sleep(5) subsystem.poweron() nvme0.reset() qpair = d.Qpair(nvme0, 10) with pytest.warns(UserWarning, match="ERROR status: 02/86"): nvme0n1.read(qpair, buf, 0, 64).waitdone() qpair.delete()
def test_replay_jedec_client_trace(nvme0, nvme0n1): q = d.Qpair(nvme0, 1024) buf = d.Buffer(256 * 512, "write", 100, 0xbeef) # upto 128K trim_buf = d.Buffer(4096) batch = 0 counter = 0 nvme0n1.format(512) with zipfile.ZipFile("scripts/stress/MasterTrace_128GB-SSD.zip") as z: for s in z.open("Client_128_GB_Master_Trace.txt"): l = str(s)[7:-5] #logging.info(l) if l[0] == 'h': # flush nvme0n1.flush(q) counter += 1 else: op, slba, nlba = l.split() slba = int(slba) nlba = int(nlba) if op == 'e': # write while nlba: n = min(nlba, 256) nvme0n1.write(q, buf, slba, n) counter += 1 slba += n nlba -= n elif op == 's': # trims trim_buf.set_dsm_range(0, slba, nlba) nvme0n1.dsm(q, trim_buf, 1) counter += 1 else: logging.info(l) # reap in batch for better efficiency if counter > 100: q.waitdone(counter) if batch % 1000 == 0: logging.info("replay batch %d" % (batch // 1000)) batch += 1 counter = 0 q.waitdone(counter) q.delete()
def test_io_qpair_msix_interrupt_all(nvme0, nvme0n1, ncqa): buf = d.Buffer(4096) ql = [] for i in range(ncqa): q = d.Qpair(nvme0, 8) ql.append(q) logging.info("qpair %d" % q.sqid) q.msix_clear() assert not q.msix_isset() nvme0n1.read(q, buf, 0, 8) time.sleep(0.1) assert q.msix_isset() q.waitdone() for q in ql: q.delete()
def subprocess_trim(pciaddr, seconds): pcie = d.Pcie(pciaddr) nvme0 = d.Controller(pcie, True) nvme0n1 = d.Namespace(nvme0) q = d.Qpair(nvme0, 8) buf = d.Buffer(4096) buf.set_dsm_range(0, 8, 8) # send trim commands start = time.time() while time.time() - start < seconds: nvme0n1.dsm(q, buf, 1).waitdone() q.delete() nvme0n1.close() pcie.close()
def test_hello_world(nvme0, nvme0n1): # prepare data buffer and IO queue read_buf = d.Buffer(512) write_buf = d.Buffer(512) write_buf[10:21] = b'hello world' qpair = d.Qpair(nvme0, 16) # create IO SQ/CQ pair, with 16 queue-depth # send write and read command def write_cb(cdw0, status1): # command callback function nvme0n1.read(qpair, read_buf, 0, 1) nvme0n1.write(qpair, write_buf, 0, 1, cb=write_cb) # wait commands complete and verify data assert read_buf[10:21] != b'hello world' qpair.waitdone(2) assert read_buf[10:21] == b'hello world'
def test_trim_basic(nvme0: d.Controller, nvme0n1: d.Namespace, verify): GB = 1024 * 1024 * 1024 all_zero_databuf = d.Buffer(512) trimbuf = d.Buffer(4096) q = d.Qpair(nvme0, 32) # DUT info logging.info("model number: %s" % nvme0.id_data(63, 24, str)) logging.info("firmware revision: %s" % nvme0.id_data(71, 64, str)) # write logging.info("write data in 10G ~ 20G") io_size = 128 * 1024 // 512 start_lba = 10 * GB // 512 lba_count = 10 * GB // 512 nvme0n1.ioworker(io_size=io_size, lba_align=io_size, lba_random=False, read_percentage=0, lba_start=start_lba, io_count=lba_count // io_size, qdepth=128).start().close() # verify data after write, data should be modified with pytest.warns(UserWarning, match="ERROR status: 02/85"): nvme0n1.compare(q, all_zero_databuf, start_lba).waitdone() # get the empty trim time trimbuf.set_dsm_range(0, 0, 0) trim_cmd = nvme0n1.dsm(q, trimbuf, 1).waitdone() # first call is longer, due to cache? start_time = time.time() trim_cmd = nvme0n1.dsm(q, trimbuf, 1).waitdone() empty_trim_time = time.time() - start_time # the trim time on device-side only logging.info("trim the 10G data from LBA 0x%lx" % start_lba) trimbuf.set_dsm_range(0, start_lba, lba_count) start_time = time.time() trim_cmd = nvme0n1.dsm(q, trimbuf, 1).waitdone() trim_time = time.time() - start_time - empty_trim_time logging.info("trim bandwidth: %0.2fGB/s" % (10 / trim_time)) # verify after trim nvme0n1.compare(q, all_zero_databuf, start_lba).waitdone()
def test_buffer_read_write(nvme0, nvme0n1): buf = d.Buffer(512, 'ascii table') #L2 logging.info("physical address of buffer: 0x%lx" % buf.phys_addr) #L3 for i in range(512): buf[i] = i % 256 #L6 print(buf.dump(128)) #L7 buf = d.Buffer(512, 'random', pvalue=100, ptype=0xbeef) #L15 print(buf.dump()) buf = d.Buffer(512, 'random', pvalue=100, ptype=0xbeef) #L17 print(buf.dump()) qpair = d.Qpair(nvme0, 10) nvme0n1.write(qpair, buf, 0).waitdone() nvme0n1.read(qpair, buf, 0).waitdone() print(buf.dump()) qpair.delete()
def test_namespace_multiple(buf): # create all controllers and namespace addr_list = [ '3d:00.0', ] # add more DUT BDF here pcie_list = [d.Pcie(a) for a in addr_list] for p in pcie_list: nvmex = d.Controller(p) qpair = d.Qpair(nvmex, 8) nvmexn1 = d.Namespace(nvmex) #Check if support write uncorrectable command wuecc_support = nvmex.id_data(521, 520) & 0x2 if wuecc_support != 0: nvmexn1.write_uncorrectable(qpair, 0, 8).waitdone() with pytest.warns(UserWarning, match="ERROR status: 02/81"): nvmexn1.read(qpair, buf, 0, 8).waitdone() nvmexn1.write(qpair, buf, 0, 8).waitdone() def this_read_cb(dword0, status1): assert status1 >> 1 == 0 nvmexn1.write_uncorrectable(qpair, 0, 8) nvmexn1.read(qpair, buf, 0, 8, cb=this_read_cb).waitdone(2) def another_read_cb(dword0, status1): logging.info("dword0: 0x%08x" % dword0) logging.info("phase bit: %d" % (status1 & 1)) logging.info("dnr: %d" % ((status1 >> 15) & 1)) logging.info("more: %d" % ((status1 >> 14) & 1)) logging.info("sct: 0x%x" % ((status1 >> 9) & 0x7)) logging.info("sc: 0x%x" % ((status1 >> 1) & 0xff)) with pytest.warns(UserWarning, match="ERROR status: 02/81"): nvmexn1.read(qpair, buf, 0, 8, cb=another_read_cb).waitdone() qpair.delete() nvmexn1.close() p.close()
def test_two_namespace_basic(nvme0n1, nvme0, verify, tcp): nvme1 = d.Controller(tcp) nvme1n1 = d.Namespace(nvme1) q1 = d.Qpair(nvme0, 32) q2 = d.Qpair(nvme1, 64) buf = d.Buffer(512) buf1 = d.Buffer(512) buf2 = d.Buffer(512) nvme0n1.write_zeroes(q1, 11, 1).waitdone() nvme0n1.write_zeroes(q1, 22, 1).waitdone() nvme1n1.write_zeroes(q2, 11, 1).waitdone() logging.info("controller0 namespace size: %d" % nvme0n1.id_data(7, 0)) logging.info("controller1 namespace size: %d" % nvme1n1.id_data(7, 0)) assert nvme0n1.id_data(7, 0) != nvme1n1.id_data(7, 0) # test nvme0n1 nvme0n1.read(q1, buf1, 11, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 0 assert buf1[504] == 0 nvme0n1.write(q1, buf, 11, 1).waitdone() nvme0n1.read(q1, buf1, 11, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 11 # test nvme1n1 nvme1n1.read(q2, buf2, 11, 1).waitdone() #print(buf2.dump()) assert buf2[0] == 0 assert buf2[504] == 0 nvme1n1.write(q2, buf, 11, 1).waitdone() nvme1n1.read(q2, buf2, 11, 1).waitdone() #print(buf2.dump()) assert buf2[0] == 11 assert buf1[:] != buf2[:] # test nvme0n1 again nvme0n1.read(q1, buf1, 11, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 11 nvme0n1.write(q1, buf, 11, 1).waitdone() nvme0n1.read(q1, buf1, 11, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 11 nvme0n1.read(q1, buf1, 22, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 0 assert buf1[504] == 0 nvme0n1.write(q1, buf, 22, 1).waitdone() nvme0n1.read(q1, buf1, 22, 1).waitdone() #print(buf1.dump()) assert buf1[0] == 22 nvme0.cmdlog(15) nvme1.cmdlog(15) q1.cmdlog(15) q2.cmdlog(15) nvme1n1.close() q1.delete() q2.delete()
def test_jsonrpc_list_qpairs(pciaddr): import json import socket # create the jsonrpc client sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect('/var/tmp/pynvme.sock') def jsonrpc_call(sock, method, params=[]): # create and send the command req = {} req['id'] = 1234567890 req['jsonrpc'] = '2.0' req['method'] = method req['params'] = params sock.sendall(json.dumps(req).encode('ascii')) # receive the result resp = json.loads(sock.recv(4096).decode('ascii')) assert resp['id'] == 1234567890 assert resp['jsonrpc'] == '2.0' return resp['result'] result = jsonrpc_call(sock, 'list_all_qpair') assert len(result) == 0 # create controller and admin queue pcie = d.Pcie(pciaddr) nvme0 = d.Controller(pcie) result = jsonrpc_call(sock, 'list_all_qpair') assert len(result) == 1 assert result[0]['qid'] - 1 == 0 result = jsonrpc_call(sock, 'list_all_qpair') assert len(result) == 1 assert result[0]['qid'] - 1 == 0 q1 = d.Qpair(nvme0, 8) result = jsonrpc_call(sock, 'list_all_qpair') assert len(result) == 2 assert result[0]['qid'] - 1 == 0 assert result[1]['qid'] - 1 == 1 q2 = d.Qpair(nvme0, 8) result = jsonrpc_call(sock, 'list_all_qpair') assert len(result) == 3 assert result[0]['qid'] - 1 == 0 assert result[1]['qid'] - 1 == 1 assert result[2]['qid'] - 1 == 2 q1.delete() result = jsonrpc_call(sock, 'list_all_qpair') assert len(result) == 2 assert result[0]['qid'] - 1 == 0 assert result[1]['qid'] - 1 == 2 q2.delete() result = jsonrpc_call(sock, 'list_all_qpair') assert len(result) == 1 assert result[0]['qid'] - 1 == 0 pcie.close() result = jsonrpc_call(sock, 'list_all_qpair') assert len(result) == 0
def qpair(nvme0): num_of_entry = (nvme0.cap & 0xffff) + 1 num_of_entry = min(1024, num_of_entry) ret = d.Qpair(nvme0, num_of_entry) yield ret ret.delete()
def test_reset_within_ioworker(nvme0, repeat): region_end = 256 * 1000 * 1000 # 1GB qdepth = min(1024, 1 + (nvme0.cap & 0xffff)) # get the unsafe shutdown count def power_cycle_count(): buf = d.Buffer(4096) nvme0.getlogpage(2, buf, 512).waitdone() return buf.data(115, 112) # run the test one by one subsystem = d.Subsystem(nvme0) nvme0n1 = d.Namespace(nvme0, 1, region_end) orig_unsafe_count = power_cycle_count() logging.info("power cycle count: %d" % orig_unsafe_count) # 128K random write cmdlog_list = [None] * 1000 with nvme0n1.ioworker(io_size=256, lba_random=True, read_percentage=30, region_end=region_end, time=10, qdepth=qdepth, output_cmdlog_list=cmdlog_list): # sudden power loss before the ioworker end time.sleep(5) nvme0.reset() # verify data in cmdlog_list time.sleep(5) assert True == nvme0n1.verify_enable(True) logging.info(cmdlog_list[-10:]) read_buf = d.Buffer(256 * 512) qpair = d.Qpair(nvme0, 10) for cmd in cmdlog_list: slba = cmd[0] nlba = cmd[1] op = cmd[2] if nlba: def read_cb(cdw0, status1): nonlocal _slba if status1 >> 1: logging.info("slba %d, 0x%x, _slba 0x%x, status 0x%x" % \ (slba, slba, _slba, status1>>1)) logging.debug("verify slba %d, nlba %d" % (slba, nlba)) _nlba = nlba // 16 for i in range(16): _slba = slba + i * _nlba nvme0n1.read(qpair, read_buf, _slba, _nlba, cb=read_cb).waitdone() # re-write to clear CRC mismatch nvme0n1.write(qpair, read_buf, slba, nlba, cb=read_cb).waitdone() qpair.delete() nvme0n1.close() # verify unsafe shutdown count unsafe_count = power_cycle_count() logging.info("power cycle count: %d" % unsafe_count) assert unsafe_count == orig_unsafe_count
def qpair(nvme0): ret = d.Qpair(nvme0, 64) yield ret ret.delete()