Esempio n. 1
0
def test_uct08_erasing_range(nvme0,
                             nvme0n1,
                             qpair,
                             verify,
                             comid,
                             new_passwd=b'123456'):
    # TODO: skip on pyrite

    # verify data before erasing
    buf = d.Buffer(64 * 512)
    nvme0n1.read(qpair, buf, 0, 64).waitdone()
    assert buf.data(11, 8) == 0x5aa55aa5

    # erasing
    tcg.Command(nvme0, comid).start_auth_session(0x69, 0, new_passwd).send()
    hsn, tsn = tcg.Response(nvme0, comid).receive().start_session()
    tcg.Command(nvme0, comid).get_active_key(hsn, tsn, 1).send()
    prev_data = tcg.Response(nvme0, comid).receive().get_active_key()
    tcg.Command(nvme0, comid).gen_new_key(hsn, tsn, 1, prev_data).send()
    tcg.Response(nvme0, comid).receive()
    tcg.Command(nvme0, comid).end_session(hsn, tsn).send(False)
    tcg.Response(nvme0, comid).receive()

    # verify
    buf = d.Buffer(64 * 512)
    nvme0n1.read(qpair, buf, 0, 64).waitdone()
    assert buf.data(11, 8) == 0
Esempio n. 2
0
def test_hello_world(nvme0, nvme0n1: d.Namespace):
    read_buf = d.Buffer(512)
    data_buf = d.Buffer(512)
    data_buf[10:21] = b'hello world'
    qpair = d.Qpair(nvme0, 16)  # create IO SQ/CQ pair, with 16 queue-depth
    assert read_buf[10:21] != b'hello world'

    def write_cb(cdw0, status1):  # command callback function
        nvme0n1.read(qpair, read_buf, 0, 1)
    nvme0n1.write(qpair, data_buf, 0, 1, cb=write_cb)
    qpair.waitdone(2)
    assert read_buf[10:21] == b'hello world'
Esempio n. 3
0
def test_hello_world(nvme0, nvme0n1, qpair):
    # prepare data buffer and IO queue
    read_buf = d.Buffer(512)
    write_buf = d.Buffer(512)
    write_buf[10:21] = b'hello world'

    # send write and read command
    def write_cb(cdw0, status1):  # command callback function
        nvme0n1.read(qpair, read_buf, 0, 1)
    nvme0n1.write(qpair, write_buf, 0, 1, cb=write_cb)

    # wait commands complete and verify data
    assert read_buf[10:21] != b'hello world'
    qpair.waitdone(2)
    assert read_buf[10:21] == b'hello world'
Esempio n. 4
0
def test_trim_basic(nvme0: d.Controller, nvme0n1, verify, qpair):
    GB = 1024 * 1024 * 1024
    all_zero_databuf = d.Buffer(512)
    orig_databuf = d.Buffer(512)
    trimbuf = d.Buffer(4096)

    # DUT info
    logging.info("model number: %s" % nvme0.id_data(63, 24, str))
    logging.info("firmware revision: %s" % nvme0.id_data(71, 64, str))

    # write
    logging.info("write data in 10G ~ 20G")
    io_size = 128 * 1024 // 512
    start_lba = 10 * GB // 512
    lba_count = 10 * GB // 512
    nvme0n1.ioworker(io_size=io_size,
                     lba_align=io_size,
                     lba_random=False,
                     read_percentage=0,
                     lba_start=start_lba,
                     io_count=lba_count // io_size,
                     qdepth=128).start().close()

    nvme0n1.read(qpair, orig_databuf, start_lba).waitdone()

    # verify data after write, data should be modified
    with pytest.warns(UserWarning, match="ERROR status: 02/85"):
        nvme0n1.compare(qpair, all_zero_databuf, start_lba).waitdone()

    # get the empty trim time
    trimbuf.set_dsm_range(0, 0, 0)
    trim_cmd = nvme0n1.dsm(qpair, trimbuf,
                           1).waitdone()  # first call is longer, due to cache?
    start_time = time.time()
    trim_cmd = nvme0n1.dsm(qpair, trimbuf, 1).waitdone()
    empty_trim_time = time.time() - start_time

    # the trim time on device-side only
    logging.info("trim the 10G data from LBA 0x%lx" % start_lba)
    trimbuf.set_dsm_range(0, start_lba, lba_count)
    start_time = time.time()
    trim_cmd = nvme0n1.dsm(qpair, trimbuf, 1).waitdone()
    trim_time = time.time() - start_time - empty_trim_time
    logging.info("trim bandwidth: %0.2fGB/s" % (10 / trim_time))

    # verify after trim
    nvme0n1.compare(qpair, all_zero_databuf, start_lba).waitdone()
    nvme0n1.compare(qpair, orig_databuf, start_lba).waitdone()
Esempio n. 5
0
def test_replay_jedec_client_trace(nvme0, nvme0n1):
    q = d.Qpair(nvme0, 1024)
    buf = d.Buffer(256 * 512, "write", 100, 0xbeef)  # upto 128K
    trim_buf = d.Buffer(4096)
    batch = 0
    counter = 0

    nvme0n1.format(512)

    with zipfile.ZipFile("scripts/stress/MasterTrace_128GB-SSD.zip") as z:
        for s in z.open("Client_128_GB_Master_Trace.txt"):
            l = str(s)[7:-5]
            #logging.info(l)

            if l[0] == 'h':
                # flush
                nvme0n1.flush(q)
                counter += 1
            else:
                op, slba, nlba = l.split()
                slba = int(slba)
                nlba = int(nlba)
                if op == 'e':
                    # write
                    while nlba:
                        n = min(nlba, 256)
                        nvme0n1.write(q, buf, slba, n)
                        counter += 1
                        slba += n
                        nlba -= n
                elif op == 's':
                    # trims
                    trim_buf.set_dsm_range(0, slba, nlba)
                    nvme0n1.dsm(q, trim_buf, 1)
                    counter += 1
                else:
                    logging.info(l)

            # reap in batch for better efficiency
            if counter > 100:
                q.waitdone(counter)
                if batch % 1000 == 0:
                    logging.info("replay batch %d" % (batch // 1000))
                batch += 1
                counter = 0

    q.waitdone(counter)
    q.delete()
Esempio n. 6
0
def test_ioworker_with_temperature_and_trim(nvme0, nvme0n1):
    # start trim process
    import multiprocessing
    mp = multiprocessing.get_context("spawn")
    p = mp.Process(target=subprocess_trim,
                   args=(nvme0.addr.encode('utf-8'), 300000))
    p.start()

    # start read/write ioworker and admin commands
    smart_log = d.Buffer(512, "smart log")
    with nvme0n1.ioworker(io_size=8,
                          lba_align=16,
                          lba_random=True,
                          qdepth=16,
                          read_percentage=67,
                          iops=10000,
                          time=10):
        for i in range(15):
            nvme0.getlogpage(0x02, smart_log, 512).waitdone()
            ktemp = smart_log.data(2, 1)

            from pytemperature import k2c
            logging.info("temperature: %0.2f degreeC" % k2c(ktemp))
            time.sleep(1)

    # wait trim process complete
    p.join()
Esempio n. 7
0
def test_ioworker_with_temperature_and_trim(nvme0, nvme0n1):
    test_seconds = 10

    # start trim process
    import multiprocessing
    mp = multiprocessing.get_context("spawn")
    p = mp.Process(target=subprocess_trim, args=(nvme0.addr, test_seconds))
    p.start()

    # start read/write ioworker and admin commands
    smart_log = d.Buffer(512, "smart log")
    with nvme0n1.ioworker(io_size=256,
                          lba_random=False,
                          read_percentage=0,
                          time=test_seconds):
        for i in range(15):
            time.sleep(1)
            nvme0.getlogpage(0x02, smart_log, 512).waitdone()
            ktemp = smart_log.data(2, 1)

            from pytemperature import k2c
            logging.info("temperature: %0.2f degreeC" % k2c(ktemp))

    # wait trim process complete
    p.join()
Esempio n. 8
0
def test_get_dell_smart_attributes(nvme0):
    import PySimpleGUI as sg

    smart = d.Buffer()
    nvme0.getlogpage(0xCA, smart, 512).waitdone()

    l = []
    l.append('Byte |  Value  | Attribute')
    l.append('   0 |  %5d  | Re-Assigned Sector Count' % smart.data(0))
    l.append('   1 |  %5d  | Program Fail Count (Worst Case Component)' %
             smart.data(1))
    l.append('   2 |  %5d  | Program Fail Count (SSD Total)' % smart.data(2))
    l.append('   3 |  %5d  | Erase Fail Count (Worst Case Component)' %
             smart.data(3))
    l.append('   4 |  %5d  | Erase Fail Count (SSD Total)' % smart.data(4))
    l.append('   5 |  %5d  | Wear Leveling Count' % smart.data(5))
    l.append(
        '   6 |  %5d  | Used Reserved Block Count (Worst Case Component)' %
        smart.data(6))
    l.append('   7 |  %5d  | Used Reserved Block Count (SSD Total)' %
             smart.data(7))
    l.append('11:8 |  %5d  | Reserved Block Count (SSD Total)' %
             smart.data(11, 8))

    layout = [[sg.Listbox(l, size=(70, 10))]]
    sg.Window("Dell SMART Attributes",
              layout + [[sg.OK()]],
              font=('monospace', 16)).Read()
Esempio n. 9
0
    def nvme_init(nvme0):
        logging.info("user defined nvme init")
        
        nvme0[0x14] = 0
        while not (nvme0[0x1c]&0x1) == 0: pass

        # 3. set admin queue registers
        nvme0.init_adminq()

        # 4. set register cc
        nvme0[0x14] = 0x00460000

        # 5. enable cc.en
        nvme0[0x14] = 0x00460001

        # 6. wait csts.rdy to 1
        while not (nvme0[0x1c]&0x1) == 1: pass

        # 7. identify controller
        nvme0.identify(d.Buffer(4096)).waitdone()

        # 8. create and identify all namespace
        nvme0.init_ns()

        # 9. set/get num of queues, 2 IO queues
        nvme0.setfeatures(0x7, cdw11=0x00010001).waitdone()
        nvme0.init_queues(nvme0.getfeatures(0x7).waitdone())

        # 10. send out all aer
        aerl = nvme0.id_data(259)+1
        for i in range(aerl):
            nvme0.aer()
Esempio n. 10
0
def test_read_lba_data(nvme0):
    lba = int(sg.PopupGetText("Which LBA to read?", "pynvme"))
    q = d.Qpair(nvme0, 10)
    b = d.Buffer(512, "LBA 0x%08x" % lba)
    nvme0n1 = d.Namespace(nvme0)
    nvme0n1.read(q, b, lba).waitdone()
    sg_show_hex_buffer(b)
Esempio n. 11
0
def do_power_cycle(dirty, subsystem, nvme0n1, nvme0):
    if not dirty:
        # notify drive for a clean shutdown
        start_time = time.time()
        subsystem.shutdown_notify()
        logging.info("notify time %.6f sec" % (time.time()-start_time))

    # boot again
    csv_start = time.time()
    start_time = time.time()
    subsystem.power_cycle(10)
    nvme0.reset()
    logging.info("init time %.6f sec" % (time.time()-start_time-10))

    # first read time
    start_time = time.time()
    q = d.Qpair(nvme0, 16)
    b = d.Buffer(512)
    lba = nvme0n1.id_data(7, 0) - 1
    nvme0n1.read(q, b, lba).waitdone()
    logging.info("media ready time %.6f sec" % (time.time()-start_time))
    q.delete()
    
    # report to csv
    ready_time = time.time()-csv_start-10
    with open("report.csv", "a") as f:
        f.write('%.6f\n' % ready_time)
Esempio n. 12
0
def test_read_lba_data(nvme0, nvme0n1):
    lba = sg.PopupGetText("Which LBA to read?", "pynvme")
    lba = int(lba, 0)  # convert to number
    q = d.Qpair(nvme0, 10)
    b = d.Buffer(512, "LBA 0x%08x" % lba)
    nvme0n1.read(q, b, lba).waitdone()
    sg_show_hex_buffer(b)
Esempio n. 13
0
def test_read_lba_data(nvme0, nvme0n1, qpair):
    import PySimpleGUI as sg

    lba = int(sg.PopupGetText("Which LBA to read?", "pynvme"))
    b = d.Buffer(512, "LBA 0x%08x" % lba)
    nvme0n1.read(qpair, b, lba).waitdone()
    sg_show_hex_buffer(b)
Esempio n. 14
0
def test_get_current_temperature(nvme0):
    from pytemperature import k2c
    smart_log = d.Buffer()
    nvme0.getlogpage(0x02, smart_log, 512).waitdone()
    ktemp = smart_log.data(2, 1)
    logging.info("current temperature in SMART data: %0.2f degreeC" %
                 k2c(ktemp))
Esempio n. 15
0
    def nvme_init(nvme0):
        logging.info("user defined nvme init")

        nvme0[0x14] = 0
        while not (nvme0[0x1c] & 0x1) == 0:
            pass
        logging.info(time.time())

        # 3. set admin queue registers
        nvme0.init_adminq()
        logging.info(time.time())

        # 5. enable cc.en
        nvme0[0x14] = 0x00460001

        # 6. wait csts.rdy to 1
        while not (nvme0[0x1c] & 0x1) == 1:
            pass
        logging.info(time.time())

        # 7. identify controller
        nvme0.identify(d.Buffer(4096)).waitdone()
        logging.info(time.time())

        nvme0.setfeatures(0x7, cdw11=0x00ff00ff).waitdone()
        nvme0.init_queues(nvme0.getfeatures(0x7).waitdone())
Esempio n. 16
0
def test_admin_page_offset(nvme0, offset):
    buf = d.Buffer(4096 * 2, 'controller identify data')
    buf.offset = offset
    assert buf[offset] == 0
    nvme0.identify(buf).waitdone()
    assert buf[0] == 0
    assert buf[offset] != 0
Esempio n. 17
0
def test_replay_jedec_client_trace(nvme0, nvme0n1, qpair):
    mdts = min(nvme0.mdts, 64 * 1024)  # upto 64K IO
    buf = d.Buffer(mdts, "write", 100, 0xbeef)
    trim_buf_list = [d.Buffer() for i in range(1024)]
    batch = 0
    counter = 0

    nvme0n1.format(512)

    with zipfile.ZipFile("scripts/stress/MasterTrace_128GB-SSD.zip") as z:
        for s in z.open("Client_128_GB_Master_Trace.txt"):
            l = str(s)[7:-5]

            if l[0] == 'h':
                # flush
                nvme0n1.flush(qpair)
                counter += 1
            else:
                op, slba, nlba = l.split()
                slba = int(slba)
                nlba = int(nlba)
                if op == 'e':
                    # write
                    while nlba:
                        n = min(nlba, mdts // 512)
                        nvme0n1.write(qpair, buf, slba, n)
                        counter += 1
                        slba += n
                        nlba -= n
                elif op == 's':
                    # trims
                    trim_buf = trim_buf_list[counter]
                    trim_buf.set_dsm_range(0, slba, nlba)
                    nvme0n1.dsm(qpair, trim_buf, 1)
                    counter += 1
                else:
                    logging.error(l)

            # reap in batch for better efficiency
            if counter >= 64:
                qpair.waitdone(counter)
                if batch % 1000 == 0:
                    logging.info("replay progress: %d" % (batch // 1000))
                batch += 1
                counter = 0

    qpair.waitdone(counter)
Esempio n. 18
0
def test_quarch_dirty_power_cycle_single(nvme0, poweron=None, poweroff=None):
    region_end = 256*1000*1000  # 1GB
    qdepth = min(1024, 1+(nvme0.cap&0xffff))
    
    # get the unsafe shutdown count
    def power_cycle_count():
        buf = d.Buffer(4096)
        nvme0.getlogpage(2, buf, 512).waitdone()
        return buf.data(115, 112)
    
    # run the test one by one
    subsystem = d.Subsystem(nvme0, poweron, poweroff)
    nvme0n1 = d.Namespace(nvme0, 1, region_end)
    assert True == nvme0n1.verify_enable(True)
    orig_unsafe_count = power_cycle_count()
    logging.info("power cycle count: %d" % orig_unsafe_count)

    # 128K random write
    cmdlog_list = [None]*1000
    with nvme0n1.ioworker(io_size=256,
                          lba_random=True,
                          read_percentage=0,
                          region_end=256*1000*1000,
                          time=30,
                          qdepth=qdepth, 
                          output_cmdlog_list=cmdlog_list):
        # sudden power loss before the ioworker end
        time.sleep(10)
        subsystem.poweroff()

    # power on and reset controller
    time.sleep(5)
    subsystem.poweron()
    nvme0.reset()

    # verify data in cmdlog_list
    logging.info(cmdlog_list[-10:])
    read_buf = d.Buffer(256*512)
    qpair = d.Qpair(nvme0, 10)
    for cmd in cmdlog_list:
        slba = cmd[0]
        nlba = cmd[1]
        op = cmd[2]
        if nlba and op==1:
            def read_cb(cdw0, status1):
                nonlocal slba
                if status1>>1:
                    logging.info("slba 0x%x, status 0x%x" % (slba, status1>>1))
            #logging.info("verify slba 0x%x, nlba %d" % (slba, nlba))
            nvme0n1.read(qpair, read_buf, slba, nlba, cb=read_cb).waitdone()
            # re-write to clear CRC mismatch
            nvme0n1.write(qpair, read_buf, slba, nlba, cb=read_cb).waitdone()
    qpair.delete()
    nvme0n1.close()

    # verify unsafe shutdown count
    unsafe_count = power_cycle_count()
    logging.info("power cycle count: %d" % unsafe_count)
    assert unsafe_count == orig_unsafe_count+1
Esempio n. 19
0
def test_vpd_write_and_read(nvme0):
    if not nvme0.supports(0x1d) or not nvme0.supports(0x1e):
        pytest.skip("mi commands are not supported")

    write_buf = d.Buffer(256, pvalue=100, ptype=0xbeef)
    read_buf = d.Buffer(256)

    status, response = mi_vpd_write(nvme0, write_buf)
    assert status == 0
    assert response == 0

    status, response = mi_vpd_read(nvme0, read_buf)
    assert status == 0
    assert response == 0

    assert write_buf != read_buf
    assert write_buf[:] == read_buf[:]
Esempio n. 20
0
def test_read_nvme_mi_data_structure_nvm_subsystem_information(nvme0):
    if not nvme0.supports(0x1d) or not nvme0.supports(0x1e):
        pytest.skip("mi commands are not supported")

    buf = d.Buffer(0x2000)
    dword0 = nvme0.mi_receive(0, 0, 0, buf).waitdone()
    logging.info(hex(dword0))
    logging.info(buf.dump(3))
Esempio n. 21
0
def test_get_log_page(nvme0, lid=None):
    import PySimpleGUI as sg

    if lid == None:
        lid = int(sg.PopupGetText("Which Log ID to read?", "pynvme"))
    lbuf = d.Buffer(512, "%s, Log ID: %d" % (nvme0.id_data(63, 24, str), lid))
    nvme0.getlogpage(lid, lbuf).waitdone()
    sg_show_hex_buffer(lbuf)
Esempio n. 22
0
def test_read_nvme_mi_data_structure_port_information_wrong_port(nvme0):
    if not nvme0.supports(0x1d) or not nvme0.supports(0x1e):
        pytest.skip("mi commands are not supported")

    buf = d.Buffer(0x2000)
    dword0 = nvme0.mi_receive(0, (1 << 24) + (1 << 16), 0, buf).waitdone()
    logging.info(hex(dword0))
    logging.info(buf.dump(32))
Esempio n. 23
0
def test_trim_time_all_range_buffer(nvme0, nvme0n1, repeat, io_size, qpair):
    buf = d.Buffer(4096)
    for i in range(4096 // 16):
        buf.set_dsm_range(i, i * io_size, io_size)

    start_time = time.time()
    nvme0n1.dsm(qpair, buf, 1).waitdone()
    with open("report.csv", "a") as f:
        f.write('%.6f\n' % (time.time() - start_time))
Esempio n. 24
0
def test_pcie_aspm_L1(pcie, nvme0, buf):
    #ASPM L1
    pcie.aspm = 2
    buf = d.Buffer(4096, 'controller identify data')
    nvme0.identify(buf, 0, 1).waitdone()
    time.sleep(1)
    #ASPM L0
    pcie.aspm = 0
    logging.info("model number: %s" % nvme0.id_data(63, 24, str))
Esempio n. 25
0
def test_buffer_read_write(nvme0, nvme0n1):
    buf = d.Buffer(512, 'ascii table')  #L2
    logging.info("physical address of buffer: 0x%lx" % buf.phys_addr)  #L3

    for i in range(512):
        buf[i] = i % 256  #L6
    print(buf.dump(128))  #L7

    buf = d.Buffer(512, 'random', pvalue=100, ptype=0xbeef)  #L15
    print(buf.dump())
    buf = d.Buffer(512, 'random', pvalue=100, ptype=0xbeef)  #L17
    print(buf.dump())

    qpair = d.Qpair(nvme0, 10)
    nvme0n1.write(qpair, buf, 0).waitdone()
    nvme0n1.read(qpair, buf, 0).waitdone()
    print(buf.dump())
    qpair.delete()
Esempio n. 26
0
def test_ioworker_with_temperature(nvme0, nvme0n1):
    smart_log = d.Buffer(512, "smart log")
    with nvme0n1.ioworker(io_size=8, lba_align=16,
                          lba_random=True, qdepth=16,
                          read_percentage=0, time=30):
        for i in range(40):
            nvme0.getlogpage(0x02, smart_log, 512).waitdone()
            ktemp = smart_log.data(2, 1)
            logging.info("temperature: %0.2f degreeC" % k2c(ktemp))
            time.sleep(1)
Esempio n. 27
0
def subprocess_trim(pciaddr, loops):
    nvme0 = d.Controller(pciaddr)
    nvme0n1 = d.Namespace(nvme0)
    q = d.Qpair(nvme0, 8)
    buf = d.Buffer(4096)
    buf.set_dsm_range(0, 8, 8)

    # send trim commands
    for i in range(loops):
        nvme0n1.dsm(q, buf, 1).waitdone()
Esempio n. 28
0
def test_trim_time_one_range(nvme0, nvme0n1, lba_count, repeat, qpair):
    buf = d.Buffer(4096)
    if lba_count == 0:
        lba_count = nvme0n1.id_data(7, 0)  # all lba
    buf.set_dsm_range(0, 0, lba_count)

    start_time = time.time()
    nvme0n1.dsm(qpair, buf, 1).waitdone()
    with open("report.csv", "a") as f:
        f.write('%.6f\n' % (time.time() - start_time))
Esempio n. 29
0
def test_admin_page_offset_invalid(nvme0, nvme0n1, qpair, offset):
    buf = d.Buffer(4096*2, 'controller identify data')
    buf.offset = offset
    buf.size = 32  # PRP1 only
    assert buf[0] == 0
    assert buf[offset] == 0
    # pytest warning may not appear here
    nvme0.identify(buf).waitdone()
    logging.info(buf.dump(16))
    assert buf[0] != 0
Esempio n. 30
0
def test_different_io_size_and_count(nvme0, nvme0n1, qpair, lba_offset,
                                     lba_count, io_count):
    # allcoate all DMA buffers for IO commands
    bufs = []
    for i in range(io_count):
        bufs.append(d.Buffer(lba_count * 512))

    # send and reap all IO command dwords
    for i in range(io_count):
        nvme0n1.read(qpair, bufs[i], lba_offset, lba_count)
    qpair.waitdone(io_count)