Esempio n. 1
0
File: mds.py Progetto: dani-lbnl/lmt
def do_plot(mds, plot, ymax, extra=None):
    fig = plt.figure()
    ax = fig.add_subplot(111)
    steps = mds.Steps.Steps
    values = mds.MDS.Values
    if ymax is None:
        ymax = np.max(values)
    Graph.timeSeries(ax, steps, values, 'b', label='MDS', Ave=True)
    plt.xlabel('time')
    plt.ylabel(r'$ops/sec$')
    if not mds.CPU is None:
        values = mds.CPU.Values
        (handles, labels) = Graph.percent(ax, steps, values, 'k',
                                          label='% CPU', Ave=True)
        if (not handles is None) and (not labels is None):
            plt.legend(handles, labels)
        else:
            print "mds.do_plot(): Warning - Plotting CPU utilization failed."
    else:
        plt.legend()
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    start_time = steps[0]/(24.0*60.0*60.0) + mpl.dates.date2num(datetime.date(1970,1,1))
    plt.title("%s metadata operations for %s" %
              (mds.name,
               mpl.dates.num2date(start_time).strftime("%Y-%m-%d"))
              )
    if ymax is None:
        ymax = ymax
    ax.set_ybound(lower=0, upper=ymax)
    if plot is None:
        plt.show()
    else:
        plt.savefig(plot)
    plt.cla()
Esempio n. 2
0
def show_metadata(args, metadata):
    """
    It would be nice if I could unify this with Graph.mds_plot().
    """
    fig = plt.figure()
    ax = fig.add_subplot(111)
    steps = metadata.Steps.Steps
    values = metadata.MDS.Values
    Graph.timeSeries(ax, steps, values, 'b', label='ops', Ave=True)
    plt.xlabel('time')
    plt.ylabel(r'$ops/sec$')
    if not metadata.CPU is None:
        values = metadata.CPU.Values
        (handles, labels) = Graph.percent(ax, steps, values, 'k', label='% CPU', Ave=True)
        plt.legend(handles, labels)
    else:
        plt.legend()
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    dayStr = time.strftime("%Y-%m-%d", time.localtime(metadata.begin.sie))
    plt.title("%s %s metadata" % (dayStr, metadata.name))
    page = StringIO.StringIO()
    plt.savefig(page, format='png')
    page.seek(0)
    plt.cla()
    return(page)
Esempio n. 3
0
def show_bulk(args, bulk):
    """
    It would be nice if I could unify this with Graph.bulk_plot().That
    would entail using the StringIO function there and handling the
    save/show option in the caller.
    """
    scale = 1024.0*1024.0
    fig = plt.figure()
    ax = fig.add_subplot(111)
    steps = bulk.Steps.Steps
    ybound = 50000.0
    values = bulk.Read.Values/scale
    Graph.timeSeries(ax, steps, values, 'r', label='read', Ave=True)
    values = bulk.Write.Values/scale
    Graph.timeSeries(ax, steps, values, 'b', label='write', Ave=True)
    plt.xlabel('time')
    plt.ylabel(r'$MiB/sec$')
    if not bulk.CPU is None:
        values = bulk.CPU.Values
        (handles, labels) = Graph.percent(ax, steps, values, 'k', label='% CPU', Ave=True)
        plt.legend(handles, labels)
    else:
        plt.legend()
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    dayStr = time.strftime("%Y-%m-%d", time.localtime(bulk.begin.sie))
    plt.title("%s %s aggregate I/O" % (dayStr, bulk.name))
    ax.set_ybound(lower = 0, upper = ybound)
    page = StringIO.StringIO()
    plt.savefig(page, format='png')
    page.seek(0)
    plt.cla()
    return(page)
Esempio n. 4
0
def do_plot(bulk, mode, plot, scale):
    fig = plt.figure()
    ax = fig.add_subplot(111)
    steps = bulk.Steps.Steps
    ymax = 0
    if mode == 'Both':
        values = bulk.Bulk.Values/(1024*1024)
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'b', label='read and write', Ave=True)
    elif mode == None:
        values = bulk.Read.Values/(1024*1024)
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'r', label='read', Ave=True)
        values = bulk.Write.Values/(1024*1024)
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'b', label='write', Ave=True)
    elif mode == 'Read':
        values = bulk.Read.Values/(1024*1024)
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'r', label='read', Ave=True)
    else:
        values = bulk.Write.Values/(1024*1024)
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'b', label='write', Ave=True)
    plt.xlabel('time')
    plt.ylabel(r'$MiB/sec$')
    if bulk.CPU != None:
        values = bulk.CPU.Values
        (handles, labels) = Graph.percent(ax, steps, values, 'k', label='% CPU', Ave=True)
        if (handles != None) and (labels != None):
            plt.legend(handles, labels)
        else:
            print "bulk_quick.do_plot(): Warning - Plotting CPU utilization failed."
    else:
        plt.legend()
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    dayStr = time.strftime("%Y-%m-%d", time.localtime(bulk.begin.sie))
    plt.title("%s %s aggregate I/O" % (dayStr, bulk.name))
    if scale == None:
        scale = ymax
    ax.set_ybound(lower = 0, upper = scale)
    if plot == None:
        plt.show()
    else:
        plt.savefig(plot)
    plt.cla()
Esempio n. 5
0
def doOSSCPUPlot(args, fsFile):
    if args.progress == True:
        print "OSS CPU plot"
    fsStepsGroup = fsFile['FSStepsGroup']
    fsStepsDataSet = fsStepsGroup['FSStepsDataSet']
    fs=fsStepsDataSet.attrs['fs']
    try:
        host=fsStepsDataSet.attrs['host']
    except:
        host='hopper'
    data = np.zeros(len(fsStepsDataSet))
    ostReadGroup = fsFile['OSTReadGroup']
    ostBulkReadDataSet = ostReadGroup['OSTBulkReadDataSet']
    ost_index = 0
    for ost_name in ostBulkReadDataSet.attrs['OSTNames']:
        data += ostBulkReadDataSet[ost_index,:]
        ost_index += 1
    ostWriteGroup = fsFile['OSTWriteGroup']
    ostBulkWriteDataSet = ostWriteGroup['OSTBulkWriteDataSet']
    ost_index = 0
    for ost_name in ostBulkWriteDataSet.attrs['OSTNames']:
        data += ostBulkWriteDataSet[ost_index,:]
        ost_index += 1
    cpu = np.zeros(len(fsStepsDataSet))
    ossCPUGroup = fsFile['OSSCPUGroup']
    ossCPUDataSet = ossCPUGroup['OSSCPUDataSet']
    oss_index = 0
    for oss_name in ossCPUDataSet.attrs['OSSNames']:
        cpu += ossCPUDataSet[oss_index,:]
        oss_index += 1
    cpu /= oss_index
    fig = plt.figure()
    ax = fig.add_subplot(111)
    Graph.timeSeries(ax, fsStepsDataSet, data, 'b', label='read plus write', Ave=True)
    plt.xlabel('time')
    plt.ylabel(r'$MiB/sec$')
    (handles, labels) = Graph.percent(ax, fsStepsDataSet, cpu, color='k', label='% CPU', Ave=True)
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    plt.title("%s %s aggregate I/O" % (fsStepsDataSet.attrs['day'],
                                       fsStepsDataSet.attrs['fs']))
    ax.set_ybound(lower = 0, upper = 50000)
    if (not handles is None) and (not labels is None):
        plt.legend(handles, labels)
    path  = os.path.dirname(args.file)
    if path == "":
        path = "."
    plt.savefig(path+'/'+host+'_'+fs+"_bulkBothCPU.png")
    plt.cla()
    return
Esempio n. 6
0
def doAction(args, b_sie, e_sie, fsFile):
    fsStepsGroup = fsFile['FSStepsGroup']
    fsStepsDataSet = fsStepsGroup['FSStepsDataSet']
    if (b_sie < fsStepsDataSet[0]) or (b_sie > fsStepsDataSet[-1]):
        print "The beginning timestamp %d is outside the date range from %d to %d" % (b_sie, fsStepsDataSet[0], fsStepsDataSet[-1])
        return
    if (e_sie < fsStepsDataSet[0]) or (e_sie > fsStepsDataSet[-1]):
        print "The beginning timestamp %d is outside the date range from %d to %d" % (e_sie, fsStepsDataSet[0], fsStepsDataSet[-1])
        return
    b_index = find_sie(b_sie, fsStepsDataSet)
    e_index = find_sie(e_sie, fsStepsDataSet)
    fs=fsStepsDataSet.attrs['fs']
    try:
        host=fsStepsDataSet.attrs['host']
    except:
        host='hopper'
    mds = np.zeros(e_index - b_index + 1)
    mdsOpsGroup = fsFile['MDSOpsGroup']
    mdsOpsDataSet = mdsOpsGroup['MDSOpsDataSet']
    op_index = 0
    for op_name in mdsOpsDataSet.attrs['OpNames']:
        mds += mdsOpsDataSet[op_index,b_index:e_index+1]
        op_index += 1
    mdsCPUGroup = fsFile['MDSCPUGroup']
    mdsCPUDataSet = mdsCPUGroup['MDSCPUDataSet']
    cpu = mdsCPUDataSet[b_index:e_index+1]
    np.set_printoptions(threshold='nan')
    #print "cpu: ", cpu
    fig = plt.figure()
    ax = fig.add_subplot(111)
    Graph.timeSeries(ax, fsStepsDataSet[b_index:e_index+1], mds, 'g', label='metadata', Ave=True)
    plt.xlabel('time')
    plt.ylabel(r'$ops/sec$')
    (handles, labels) = Graph.percent(ax, fsStepsDataSet[b_index:e_index+1], cpu, color='k', label='% CPU', Ave=True)
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    plt.title("%s %s Metadata Operations" % (fsStepsDataSet.attrs['day'],
                                             fsStepsDataSet.attrs['fs']))
    ax.set_ybound(lower = 0, upper = 120000)
    if (not handles is None) and (not labels is None):
        plt.legend(handles, labels)
    if args.plot is None:
        plt.show()
    else:
        plt.savefig(args.plot)
    plt.cla()
    return
Esempio n. 7
0
def doMDSPlot(args, fsFile):
    if args.progress == True:
        print "MDS plot"
    fsStepsGroup = fsFile['FSStepsGroup']
    fsStepsDataSet = fsStepsGroup['FSStepsDataSet']
    intervals = np.zeros(len(fsStepsDataSet))
    intervals[1:] = np.diff(fsStepsDataSet)
    fs=fsStepsDataSet.attrs['fs']
    try:
        host=fsStepsDataSet.attrs['host']
    except:
        host='hopper'
    mds = np.zeros(len(fsStepsDataSet))
    mdsOpsGroup = fsFile['MDSOpsGroup']
    mdsOpsDataSet = mdsOpsGroup['MDSOpsDataSet']
    op_index = 0
    for op_name in mdsOpsDataSet.attrs['OpNames']:
        mds += mdsOpsDataSet[op_index,:]
        op_index += 1
    highVals, = np.where(mds > 100000)
    if len(highVals) > 0:
        print "Warning: Exceedingly high values reported for ", highVals
        print fsStepsDataSet[mds > 1000000]
        print mds[mds > 1000000]
    AggregateOps = np.sum(mds*intervals)
    cpu = np.zeros(len(fsStepsDataSet))
    mdsCPUGroup = fsFile['MDSCPUGroup']
    mdsCPUDataSet = mdsCPUGroup['MDSCPUDataSet']
    fig = plt.figure()
    ax = fig.add_subplot(111)
    Graph.timeSeries(ax, fsStepsDataSet, mds, 'b', label='mds', Ave=True)
    plt.xlabel('time')
    plt.ylabel(r'$Ops/sec$')
    (handles, labels) = Graph.percent(ax, fsStepsDataSet, mdsCPUDataSet, color='k', label='% CPU', Ave=True)
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    plt.title("%s %s Metadata Operations" % (fsStepsDataSet.attrs['day'],
                                             fsStepsDataSet.attrs['fs']))
    ax.set_ybound(lower = 0, upper = 120000)
    if (not handles is None) and (not labels is None):
        plt.legend(handles, labels)
    path  = os.path.dirname(args.file)
    if path == "":
        path = "."
    plt.savefig(path+'/'+host+'_'+fs+"_MDS+CPU.png")
    plt.cla()
    return(AggregateOps)
Esempio n. 8
0
def do_action(args, bulk, brwfs):
    steps = bulk.Steps.Steps[1:]
    num_steps = len(steps)
    times = bulk.Steps.Diff
    # We don't want to distort the analysis by having some observation
    # intervals longer than the nominal five seconds, do divide by the
    # actual elapsed time to produce rates in MB/s, and Count/s
    if not args.ost is None:
        ost = bulk.getOST(ost=args.ost)
        if ost is None:
            print "we didn't find %s" % args.ost
        readMBpS  = ost.Read.Values[1:]
        readMBpS /= (1024*1024)
        writeMBpS = ost.Write.Values[1:]
        writeMBpS /= (1024*1024)
        # no cpu caclulation for one OST
        cpu       = np.zeros_like(readMBpS)
        brwost = brwfs.getOST(ost=ost.name)
        if brwost is None:
            print "we didn't find %s among the brw_stats" % ost.name
            return
        id = brwost.getStatId(args.stat)
        stat_bins = brwost.Bins[brwost.BrwIdDict[id]].Bins
        readHistpS  = np.diff(brwost.Read[id].Values)/times
        readHistpS[np.where(readHistpS.mask==True)] = 0
        writeHistpS = np.diff(brwost.Write[id].Values)/times
        writeHistpS[np.where(writeHistpS.mask==True)] = 0
    else:
        readMBpS  = bulk.Read.Values[1:]
        readMBpS /= (1024*1024)
        writeMBpS = bulk.Write.Values[1:]
        writeMBpS /= (1024*1024)
        cpu       = bulk.CPU.Values[1:]
        readHistpS  = None
        writeHistpS = None
        num_osts = 0
        np.set_printoptions(threshold='nan')
        for oss in bulk.OSSs:
            for ost in oss.OSTs:
                print ost.name
                brwost = brwfs.getOST(ost=ost.name)
                id = brwost.getStatId(args.stat)
                stat_bins = brwost.Bins[brwost.BrwIdDict[id]].Bins
                readpS  = np.diff(brwost.Read[id].Values)/times
                readpS[np.where(readpS.mask==True)] = 0
                if readHistpS is None:
                    readHistpS = np.zeros_like(readpS)
                print readpS
                readHistpS += readpS
                writepS = np.diff(brwost.Write[id].Values)/times
                writepS[np.where(writepS.mask==True)] = 0
                if writeHistpS is None:
                    writeHistpS = np.zeros_like(writepS)
                print writepS
                writeHistpS += writepS
                num_osts += 1
        if (num_osts == 0) or (readHistpS is None) or (writeHistpS is None):
            print "we didn't get anything for the brw_stats data"
            return
        readHistpS /= num_osts
        writeHistpS /= num_osts
    histSeries = np.transpose(np.vstack((readHistpS, writeHistpS)))
    if args.report == True:
        print "%d steps" % len(steps)
    num_bins = len(stat_bins)
    # Now we want to construct the two element distillation of histSeries
    distill = np.zeros((2*num_bins, 4), dtype=np.float64)
    distill[0:num_bins,0] = 1.0
    distill[0:num_bins,1] = stat_bins
    distill[num_bins:2*num_bins,2] = 1.0
    distill[num_bins:2*num_bins,3] = stat_bins
    A = np.matrix(histSeries)*np.matrix(distill)
    # This is the result of the two_element_model.py calculation:
    x = np.matrix([1.62589065e-03, 4.39766334e-09, 3.23092722e-03, 1.72900072e-09])
    yhat = A * np.transpose(x)
    if args.report == True:
        print "yhat:"
        print yhat
    factor = 1
    if args.hist == True:
        if np.max(yhat) > 1.0:
            factor = int(np.max(yhat)) + 1
        utilHist, utilBins = np.histogram(yhat*100.0, bins=100, range=(0.0, 100.0*factor))
        if args.report == True:
            print "utilBins:"
            print utilBins
            print "utilHist:"
            print utilHist
    if args.plot == "noplot":
        return
    fig = plt.figure()
    ax = fig.add_subplot(111)
    Graph.timeSeries(ax, steps, readMBpS, 'r', label='read', Ave=False)
    Graph.timeSeries(ax, steps, writeMBpS, 'b', label='write', Ave=False)
    plt.xlabel('time')
    plt.ylabel(r'$MiB/sec$')
    (handles, labels) = Graph.percent(ax, steps, yhat*100.0, 'k', format="-", label='pct FSU', Ave=False)
    if (not handles is None) and (not labels is None):
        plt.legend(handles, labels)
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    dayStr = time.strftime("%Y-%m-%d", time.localtime(bulk.begin.sie))
    plt.title("%s %s File System Utilization" % (dayStr, bulk.name))
    if args.ybound is None:
        ax.set_ybound(lower=0)
    else:
        ax.set_ybound(lower=0, upper=args.ybound)
    if args.plot is None:
        plt.show()
    else:
        plt.savefig(args.plot)
    plt.cla()
    if args.hist == False:
        return
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.bar(utilBins[1:], utilHist, color='k')
    plt.xlabel('pct fs util')
    plt.ylabel('count')
    plt.title('Distribution of File System Utilization')
    if args.plot is None:
        plt.show()
    else:
        plt.savefig('hist_'+args.plot)
    plt.cla()
    return
Esempio n. 9
0
def doAction(args, b_sie, e_sie, fsFile):
    fsStepsGroup = fsFile['FSStepsGroup']
    fsStepsDataSet = fsStepsGroup['FSStepsDataSet']
    if (b_sie < fsStepsDataSet[0]) or (b_sie > fsStepsDataSet[-1]):
        print "The beginning timestamp %d is outside the date range from %d to %d" % (b_sie, fsStepsDataSet[0], fsStepsDataSet[-1])
        return
    if (e_sie < fsStepsDataSet[0]) or (e_sie > fsStepsDataSet[-1]):
        print "The ending timestamp %d is outside the date range from %d to %d" % (e_sie, fsStepsDataSet[0], fsStepsDataSet[-1])
        return
    b_index = find_sie(b_sie, fsStepsDataSet)
    e_index = find_sie(e_sie, fsStepsDataSet)
    #print "data from index %d to %d" % (b_index, e_index)
    fs=fsStepsDataSet.attrs['fs']
    try:
        host=fsStepsDataSet.attrs['host']
    except:
        host='hopper'
    read = np.zeros(e_index - b_index + 1)
    ostReadGroup = fsFile['OSTReadGroup']
    ostBulkReadDataSet = ostReadGroup['OSTBulkReadDataSet']
    ost_index = 0
    if args.report == True:
        np.set_printoptions(threshold='nan')
    for ost_name in ostBulkReadDataSet.attrs['OSTNames']:
        #print "OST %s" % ost_name
        read += ostBulkReadDataSet[ost_index,b_index:e_index+1]
        #print "read:", ostBulkReadDataSet[ost_index,b_index:e_index+1]
        ost_index += 1
    read /= (1024*1024)
    write = np.zeros(e_index - b_index + 1)
    ostWriteGroup = fsFile['OSTWriteGroup']
    ostBulkWriteDataSet = ostWriteGroup['OSTBulkWriteDataSet']
    ost_index = 0
    for ost_name in ostBulkWriteDataSet.attrs['OSTNames']:
        #print "OST %s" % ost_name
        write += ostBulkWriteDataSet[ost_index,b_index:e_index+1]
        #print "write: ", ostBulkWriteDataSet[ost_index,b_index:e_index+1]
        ost_index += 1
    write /= (1024*1024)
    cpu = np.zeros(e_index - b_index + 1)
    ossCPUGroup = fsFile['OSSCPUGroup']
    ossCPUDataSet = ossCPUGroup['OSSCPUDataSet']
    oss_index = 0
    for oss_name in ossCPUDataSet.attrs['OSSNames']:
        cpu += ossCPUDataSet[oss_index,b_index:e_index+1]
        oss_index += 1
    cpu /= oss_index
    if args.report == True:
        print "read:", read
        print "write: ", write
        print "cpu: ", cpu
    if args.plot == "noplot":
        return
    fig = plt.figure()
    ax = fig.add_subplot(111)
    Graph.timeSeries(ax, fsStepsDataSet[b_index:e_index+1], read, 'r', label='read', Ave=args.ave)
    Graph.timeSeries(ax, fsStepsDataSet[b_index:e_index+1], write, 'b', label='write', Ave=args.ave)
    (handles, labels) = ax.get_legend_handles_labels()
    plt.xlabel('time')
    plt.ylabel(r'$MiB/sec$')
    if args.cpu == True:
        (handles, labels) = Graph.percent(ax, fsStepsDataSet[b_index:e_index+1], cpu, color='k', label='% CPU', Ave=args.ave)
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    plt.title("%s %s aggregate I/O" % (fsStepsDataSet.attrs['day'],
                                       fsStepsDataSet.attrs['fs']))
    if args.ybound is None:
        ax.set_ybound(lower = 0, upper = 50000)
    else:
        ax.set_ybound(lower = 0, upper = args.ybound)
    if (not handles is None) and (not labels is None):
        plt.legend(handles, labels)
    if args.plot is None:
        plt.show()
    else:
        plt.savefig(args.plot)
    plt.cla()
    return
Esempio n. 10
0
def do_action(args, b_sie, e_sie, fsFile):
    fsStepsGroup = fsFile['FSStepsGroup']
    fsStepsDataSet = fsStepsGroup['FSStepsDataSet']
    if (b_sie < fsStepsDataSet[0]) or (b_sie > fsStepsDataSet[-1]):
        print "The beginning timestamp %d is outside the date range from %d to %d" % (b_sie, fsStepsDataSet[0], fsStepsDataSet[-1])
        return
    if (e_sie < fsStepsDataSet[0]) or (e_sie > fsStepsDataSet[-1]):
        print "The ending timestamp %d is outside the date range from %d to %d" % (e_sie, fsStepsDataSet[0], fsStepsDataSet[-1])
        return
    b_index = find_sie(b_sie, fsStepsDataSet)
    e_index = find_sie(e_sie, fsStepsDataSet)
    if b_index == None:
        print "brw_stats_model_h5lmt.do_action(): Failed to find timestamp index for %d" % b_sie
        return
    if e_index == None:
        print "brw_stats_model_h5lmt.do_action(): Failed to find timestamp index for %d" % e_sie
        return
    fs=fsStepsDataSet.attrs['fs']
    try:
        host=fsStepsDataSet.attrs['host']
    except:
        host='hopper'
    readMBpS = np.zeros(e_index - b_index + 1)
    ostReadGroup = fsFile['OSTReadGroup']
    ostBulkReadDataSet = ostReadGroup['OSTBulkReadDataSet']
    ost_index = 0
    for ost_name in ostBulkReadDataSet.attrs['OSTNames']:
        readMBpS += ostBulkReadDataSet[ost_index,b_index:e_index+1]
        ost_index += 1
    readMBpS /= 1024*1024
    np.set_printoptions(threshold='nan')
    writeMBpS = np.zeros(e_index - b_index + 1)
    ostWriteGroup = fsFile['OSTWriteGroup']
    ostBulkWriteDataSet = ostWriteGroup['OSTBulkWriteDataSet']
    ost_index = 0
    for ost_name in ostBulkWriteDataSet.attrs['OSTNames']:
        writeMBpS += ostBulkWriteDataSet[ost_index,b_index:e_index+1]
        ost_index += 1
    writeMBpS /= 1024*1024
    ostIosizeReadDataSet = ostReadGroup['OSTIosizeReadDataSet']
    if args.stat != ostIosizeReadDataSet.attrs['stat']:
        print "We should only be seeing BRW_IOSIZE statistics not %s" % ostIosizeReadDataSet.attrs['stat']
        return
    bins = ostIosizeReadDataSet.attrs['bins']
    ostIosizeWriteDataSet = ostWriteGroup['OSTIosizeWriteDataSet']
    readHistpS  = None
    writeHistpS = None
    ost_index = 0
    if args.report == True:
        np.set_printoptions(threshold='nan')
    for ost_name in ostBulkReadDataSet.attrs['OSTNames']:
        if args.progress == True:
            print "OST %d: %s" % (ost_index, ost_name)
        readpS = ostIosizeReadDataSet[ost_index,:,b_index:e_index+1]
        if readHistpS is None:
            readHistpS = np.zeros_like(readpS)
        #print readpS
        readHistpS += readpS
        writepS = ostIosizeWriteDataSet[ost_index,:,b_index:e_index+1]
        if writeHistpS is None:
            writeHistpS = np.zeros_like(writepS)
        #print writepS
        writeHistpS += writepS
        ost_index += 1
    if (ost_index == 0) or (readHistpS is None) or (writeHistpS is None):
        print "we didn't get anything for the brw_stats data"
        return
    readHistpS /= ost_index
    writeHistpS /= ost_index
    histSeries = np.transpose(np.vstack((readHistpS, writeHistpS)))
    if args.report == True:
        print "%d steps" % len(fsStepsDataSet)
    num_bins = len(bins)
    # Now we want to construct the two element distillation of histSeries
    distill = np.zeros((2*num_bins, 4), dtype=np.float64)
    distill[0:num_bins,0] = 1.0
    distill[0:num_bins,1] = bins
    distill[num_bins:2*num_bins,2] = 1.0
    distill[num_bins:2*num_bins,3] = bins
    A = np.matrix(histSeries)*np.matrix(distill)
    # This is the result of the two_element_model.py calculation:
    x = np.matrix([1.62589065e-03, 4.39766334e-09, 3.23092722e-03, 1.72900072e-09])
    yhat = A * np.transpose(x)
    yhat = np.array(yhat)
    yhat *= 100.0
    if args.report == True:
        print "yhat (percent):"
        print yhat
    if args.hist == True:
        doHist(args, histSeries, yhat, fsStepsDataSet)
    if args.plot == "noplot":
        return
    fig = plt.figure()
    ax = fig.add_subplot(111)
    Graph.timeSeries(ax, fsStepsDataSet[b_index:e_index+1], readMBpS, 'r', label='read', Ave=False)
    Graph.timeSeries(ax, fsStepsDataSet[b_index:e_index+1], writeMBpS, 'b', label='write', Ave=False)
    plt.xlabel('time')
    plt.ylabel(r'$MiB/sec$')
    (handles, labels) = Graph.percent(ax, fsStepsDataSet[b_index:e_index+1], yhat, 'k', label='FSU', Ave=False, ybound=200.0)
    if (not handles is None) and (not labels is None):
        plt.legend(handles, labels)
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    dayStr = time.strftime("%Y-%m-%d", time.localtime(fsStepsDataSet[0]))
    plt.title("%s %s File System Utilization" % (dayStr, fs))
    if args.ybound is None:
        ax.set_ybound(lower=0, upper=50000)
    else:
        ax.set_ybound(lower=0, upper=args.ybound)
    if args.plot is None:
        plt.show()
    else:
        plt.savefig(args.plot)
    return
Esempio n. 11
0
def do_plot(B, mode=None, plot=None, ybound=None,
            scale=1024.0*1024.0, withCPU=True):
    if args.lines == True:
        format = '-'
    else:
        format = None
    fig = plt.figure()
    ax = fig.add_subplot(111)
    steps = B.Steps.Steps
    ymax = 0
    np.set_printoptions(threshold='nan')
    if mode == 'Both':
        values = B.Bulk.Values/scale
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'b', label='read and write',
                         Ave=True, format=format)
    elif mode is None:
        values = B.Read.Values/scale
        #print "read: ", values
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'r', label='read',
                         Ave=True, format=format)
        values = B.Write.Values/scale
        #print "write: ", values
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'b', label='write',
                         Ave=True, format=format)
    elif mode == 'Read':
        values = B.Read.Values/scale
        #print "read: ", values
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'r', label='read',
                         Ave=True, format=format)
    else:
        values = B.Write.Values/scale
        #print "write: ", values
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'b', label='write',
                         Ave=True, format=format)
    plt.xlabel('time')
    plt.ylabel(r'$MiB/sec$')
    if (withCPU == True) and (not B.CPU is None):
        values = np.array(B.CPU.Values)
        #values[np.where(values.mask==True)] = 0.0
        (handles, labels) = Graph.percent(ax, steps, values, 'k',
                                          label='pct CPU', Ave=True)
        # insert bogus here for testing
        if (not handles is None) and (not labels is None):
            plt.legend(handles, labels)
        else:
            print "bulk.do_plot(): Warning - Plotting CPU utilization failed."
    else:
        plt.legend()
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    dayStr = time.strftime("%Y-%m-%d", time.localtime(B.begin.sie))
    plt.title("%s %s aggregate I/O" % (dayStr, B.name))
    if ybound is None:
        ybound = ymax
    ax.set_ybound(lower = 0, upper = ybound)
    if plot is None:
        plt.show()
    else:
        plt.savefig(plot)
    plt.cla()
Esempio n. 12
0
File: oss.py Progetto: dani-lbnl/lmt
def do_plot(oss, mode=None, plot=None, ybound=None,
            scale=1024.0*1024.0):
    if args.lines == True:
        format = '-'
    else:
        format = None
    fig = plt.figure()
    ax = fig.add_subplot(111)
    steps = oss.Steps.Steps
    ymax = 0
    if mode == 'Both':
        values = oss.OSS.Values/scale
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'b', label='read and write',
                         Ave=True, format=format)
    elif mode is None:
        values = oss.Read.Values/scale
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'r', label='read',
                         Ave=True, format=format)
        values = oss.Write.Values/scale
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'b', label='write',
                         Ave=True, format=format)
    elif mode == 'Read':
        values = oss.Read.Values/scale
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'r', label='read',
                         Ave=True, format=format)
    else:
        values = oss.Write.Values/scale
        max = np.max(values)
        if max > ymax:
            ymax = max
        Graph.timeSeries(ax, steps, values, 'b', label='write',
                         Ave=True)
    plt.xlabel('time')
    plt.ylabel(r'$MiB/sec$')
    if not oss.CPU is None:
        values = oss.CPU.Values
        (handles, labels) = Graph.percent(ax, steps, values, 'k',
                                          label='% CPU', Ave=True)
        if (not handles is None) and (not labels is None):
            plt.legend(handles, labels)
        else:
            print "oss.do_plot(): Warning - Plotting CPU utilization failed."
    else:
        plt.legend()
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    dayStr = time.strftime("%Y-%m-%d", time.localtime(oss.begin.sie))
    plt.title("%s %s aggregate I/O" % (dayStr, oss.name))
    if ybound is None:
        ybound = ymax
    ax.set_ybound(lower = 0, upper = ybound)
    if plot is None:
        plt.show()
    else:
        plt.savefig(plot)
    plt.cla()
Esempio n. 13
0
def doRatePlot(args, fsFile):
    """
    The values in the h5lmt arrays are time series of observaitons
    of true rates in in MB/s. In order to aggregate you need to
    multiply by the interval lengths. Those are hard coded to
    five seconds in the current implementation, but that is not
    something to necessarily rely on going forward.
    """
    if args.progress == True:
        print "Rate plot"
    fsStepsGroup = fsFile['FSStepsGroup']
    fsStepsDataSet = fsStepsGroup['FSStepsDataSet']
    intervals = np.zeros(len(fsStepsDataSet))
    intervals[1:] = np.diff(fsStepsDataSet)
    fs=fsStepsDataSet.attrs['fs']
    try:
        host=fsStepsDataSet.attrs['host']
    except:
        host='hopper'
    read = np.zeros(len(fsStepsDataSet))
    ostReadGroup = fsFile['OSTReadGroup']
    ostBulkReadDataSet = ostReadGroup['OSTBulkReadDataSet']
    ost_index = 0
    for ost_name in ostBulkReadDataSet.attrs['OSTNames']:
        read += ostBulkReadDataSet[ost_index,:]
        ost_index += 1
    read /= (1024*1024)
    AggregateRead = np.sum(read*intervals)
    write = np.zeros(len(fsStepsDataSet))
    ostWriteGroup = fsFile['OSTWriteGroup']
    ostBulkWriteDataSet = ostWriteGroup['OSTBulkWriteDataSet']
    ost_index = 0
    for ost_name in ostBulkWriteDataSet.attrs['OSTNames']:
        write += ostBulkWriteDataSet[ost_index,:]
        ost_index += 1
    write /= (1024*1024)
    AggregateWrite = np.sum(write*intervals)
    cpu = np.zeros(len(fsStepsDataSet))
    ossCPUGroup = fsFile['OSSCPUGroup']
    ossCPUDataSet = ossCPUGroup['OSSCPUDataSet']
    oss_index = 0
    for oss_name in ossCPUDataSet.attrs['OSSNames']:
        cpu += ossCPUDataSet[oss_index,:]
        oss_index += 1
    cpu /= oss_index
    fig = plt.figure()
    ax = fig.add_subplot(111)
    Graph.timeSeries(ax, fsStepsDataSet, read, 'r', label='read', Ave=True)
    Graph.timeSeries(ax, fsStepsDataSet, write, 'b', label='write', Ave=True)
    plt.xlabel('time')
    plt.ylabel(r'$MiB/sec$')
    (handles, labels) = Graph.percent(ax, fsStepsDataSet, cpu, color='k', label='% CPU', Ave=True)
    plt.setp( ax.get_xticklabels(), rotation=30, horizontalalignment='right')
    plt.title("%s %s aggregate I/O" % (fsStepsDataSet.attrs['day'],
                                       fsStepsDataSet.attrs['fs']))
    ax.set_ybound(lower = 0, upper = 50000)
    if (not handles is None) and (not labels is None):
        plt.legend(handles, labels)
    else:
        plt.legend()
    path  = os.path.dirname(args.file)
    if path == "":
        path = "."
    plt.savefig(path+'/'+host+'_'+fs+"_bulkRateCPU.png")
    plt.cla()
    return(AggregateRead, AggregateWrite)