def fstest(fs, filesize=16 * MEG, depth=1, direct=False, sync=False, crtdlt=False, bsizes=(4096, 128 * 1024, 4096 * 1024)): """ compute & display standard fio to filesystem on a disk fs -- file system to be tested filesize -- size of file in which I/O is being done depth -- number of concurrent requests direct -- I/O is direct (not buffered) sync -- updates are immediately flushed """ if crtdlt: (tc, bwc, loadc) = fs.create(sync=sync) (td, bwd, loadd) = fs.delete(sync=sync) r = Report(("create", "delete")) r.printHeading() r.printIOPS(1, (bwc, bwd)) r.printLatency(1, (tc, td)) r = Report(("seq read", "seq write", "rnd read", "rnd write")) r.printHeading() for bs in bsizes: (tsr, bsr, lsr) = fs.read(bs, filesize, seq=True, depth=depth, direct=direct) (tsw, bsw, lsw) = fs.write(bs, filesize, seq=True, depth=depth, direct=direct, sync=sync) (trr, brr, lrr) = fs.read(bs, filesize, seq=False, depth=depth, direct=direct) (trw, brw, lrw) = fs.write(bs, filesize, seq=False, depth=depth, direct=direct, sync=sync) r.printBW(bs, (bsr, bsw, brr, brw)) r.printIOPS(bs, (bsr, bsw, brr, brw)) r.printLatency(bs, (tsr, tsw, trr, trw))
def gatewaytest(gw, dict, descr=""): """ exercise a gateway with tests described in a dict s -- server to be tested dict -- SioCdepth ... list of request depths SioCbs ... list of block sizes SioCmisc ... do create/delete ops too? """ dflt = { # default throughput test parameters 'SioCdepth': [1, 32], 'SioCbs': [4096, 128 * 1024, 4096 * 1024], 'SioCmisc': False, } depths = dict['SioCdepth'] if 'SioCdepth' in dict else dflt['SioCdepth'] bsizes = dict['SioCbs'] if 'SioCbs' in dict else dflt['SioCbs'] misc = dict['SioCmisc'] if 'SioCmisc' in dict else dflt['SioCmisc'] """ compute & display standard test results """ r = Report(("seq read", "seq write", "rnd read", "rnd write")) for d in depths: if misc: tc = gw.create(depth=d) td = gw.delete(depth=d) r = Report(("create", "delete")) r.printHeading() r.printIOPS(1, (SECOND / tc, SECOND / td)) r.printLatency(1, (tc, td)) print("") print("Gateway throughput: %s, depth=%d" % (descr, d)) r = Report(("seq read", "seq write", "rnd read", "rnd write")) r.printHeading() for bs in bsizes: (tsr, bsr, rload) = gw.read(bs, depth=d, seq=True) (tsw, bsw, wload) = gw.write(bs, depth=d, seq=True) (trr, brr, rload) = gw.read(bs, depth=d, seq=False) (trw, brw, wload) = gw.write(bs, depth=d, seq=False) r.printBW(bs, (bsr, bsw, brr, brw)) # compute the corresponding IOPS isr = bsr / bs isw = bsw / bs irr = brr / bs irw = brw / bs r.printIOPS(0, (isr, isw, irr, irw)) r.printLatency(0, (tsr, tsw, trr, trw)) print("")
def tptest(disk, dict, descr="Estimated Throughput"): """ run a standard set of throughputs against a specified device disk -- device to be tested dict -- FioRsize ... size of test file FioRdepths ... list of request depths FioRbs ... list of block sizes filesize -- size of the file used for the test depth -- number of queued parallel operations """ dflt = { # default throughput test parameters 'FioRsize': 16 * GIG, 'FioRdepth': [1, 32], 'FioRbs': [4096, 128 * 1024, 4096 * 1024], } sz = dict['FioRsize'] if 'FioRsize' in dict else dflt['FioRsize'] depths = dict['FioRdepth'] if 'FioRdepth' in dict else dflt['FioRdepth'] bsizes = dict['FioRbs'] if 'FioRbs' in dict else dflt['FioRbs'] r = Report(("seq read", "seq write", "rnd read", "rnd write")) for depth in depths: print("%s (%s), depth=%d" % (descr, disk.desc, depth)) r.printHeading() for bs in bsizes: # run the simulations tsr = disk.avgTime(bs, sz, read=True, seq=True, depth=depth) tsw = disk.avgTime(bs, sz, read=False, seq=True, depth=depth) trr = disk.avgTime(bs, sz, read=True, seq=False, depth=depth) trw = disk.avgTime(bs, sz, read=False, seq=False, depth=depth) # compute the corresponding bandwidths bsr = bs * SECOND / tsr bsw = bs * SECOND / tsw brr = bs * SECOND / trr brw = bs * SECOND / trw r.printBW(bs, (bsr, bsw, brr, brw)) # compute the corresponding IOPS isr = SECOND / tsr isw = SECOND / tsw irr = SECOND / trr irw = SECOND / trw r.printIOPS(0, (isr, isw, irr, irw)) # print out the latencies r.printLatency(0, (tsr, tsw, trr, trw)) print("")
def testNIC(nic, dict, descr): defaults = { 'bsizes': [64, 128, 256, 512, 1024, 2048, 4096, 8192, 16 * 1024] } print(descr) r = Report(("cpu read", "tot read", "cpu write", "tot write")) r.printHeading() bsizes = dict['bsizes'] if 'bsizes' in dict else defaults['bsizes'] for bs in bsizes: tr = nic.read_time(bs) cr = nic.read_cpu(bs) tw = nic.write_time(bs) cw = nic.write_cpu(bs) r.printLatency(bs, (cr, tr, cw, tw)) print("")
def testHBA(hba, dict, descr): defaults = { 'bsizes': [512, 4096, 128 * 1024, 4096 * 1024] } print(descr) r = Report(("cpu read", "tot read", "cpu write", "tot write")) r.printHeading() bsizes = dict['bsizes'] if 'bsizes' in dict else defaults['bsizes'] for bs in bsizes: tr = hba.read_time(bs) cr = hba.read_cpu(bs) tw = hba.write_time(bs) cw = hba.write_cpu(bs) r.printLatency(bs, (cr, tr, cw, tw)) print("")
def servertest(fs, depth=1, crtdlt=False, bsizes=(4096, 128 * 1024, 4096 * 1024)): """ compute & display standard test results """ if crtdlt: tc = fs.create() td = fs.delete() r = Report(("create", "delete")) r.printHeading() r.printIOPS(1, (SECOND / tc, SECOND / td)) r.printLatency(1, (tc, td)) r = Report(("seq read", "seq write", "rnd read", "rnd write")) r.printHeading() for bs in bsizes: (tsr, bsr, rload) = fs.read(bs, depth=depth, seq=True) (tsw, bsw, wload) = fs.write(bs, depth=depth, seq=True) (trr, brr, rload) = fs.read(bs, depth=depth, seq=False) (trw, brw, wload) = fs.write(bs, depth=depth, seq=False) r.printBW(bs, (bsr, bsw, brr, brw)) r.printIOPS(bs, (bsr, bsw, brr, brw)) r.printLatency(bs, (tsr, tsw, trr, trw))
def tptest(disk, filesize, depth=1, bsizes=(4096, 128 * 1024, 4096 * 1024)): """ run a standard set of throughputs against a specified device disk -- device to be tested filesize -- size of the file used for the test depth -- number of queued parallel operations """ r = Report(("seq read", "seq write", "rnd read", "rnd write")) r.printHeading() for bs in bsizes: tsr = disk.avgTime(bs, filesize, read=True, seq=True, depth=depth) tsw = disk.avgTime(bs, filesize, read=False, seq=True, depth=depth) trr = disk.avgTime(bs, filesize, read=True, seq=False, depth=depth) trw = disk.avgTime(bs, filesize, read=False, seq=False, depth=depth) # compute the corresponding bandwidths bsr = bs * SECOND / tsr bsw = bs * SECOND / tsw brr = bs * SECOND / trr brw = bs * SECOND / trw r.printBW(bs, (bsr, bsw, brr, brw)) r.printIOPS(bs, (bsr, bsw, brr, brw)) r.printLatency(bs, (tsr, tsw, trr, trw))
# if __name__ == '__main__': cpu = makeCPU([]) print("%s w/%dGB of DDR3-%d RAM" % (cpu.desc, cpu.mem_size / GIG, cpu.mem_speed)) print print(" thread switch %dus" % (cpu.thread_us())) print(" process switch %dus" % (cpu.proc_us())) print(" DMA start/intr %dus" % (cpu.dma_us())) from Report import Report r = Report(("mem-rd", "mem-wrt", "process", "instrs")) print r.printHeading() sizes = [1024, 4096, 128*1024, 1024*1024] for bs in sizes: mem_r = cpu.mem_read(bs) mem_w = cpu.mem_write(bs) mem_p = cpu.process(bs) mem_x = cpu.execute(bs) r.printLatency(bs, (mem_r, mem_w, mem_p, mem_x)) r = Report(("sha-1", "comp", "decomp", "RAID-6")) print r.printHeading() sizes = [1024, 4096, 128*1024, 1024*1024] for bs in sizes: sha_t = cpu.sha_time(bs) sha_c = cpu.sha_cpu(bs)